hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ca1af61d2409c57a792eaf5f0981ff6c182834d6
| 159
|
py
|
Python
|
py_eegepe/met/__init__.py
|
jrmxn/py_eegepe
|
40d4c20295828f0131b790440be05571df914140
|
[
"MIT"
] | 2
|
2020-04-16T04:21:40.000Z
|
2021-12-21T06:19:40.000Z
|
py_eegepe/met/__init__.py
|
jrmxn/py_eegepe
|
40d4c20295828f0131b790440be05571df914140
|
[
"MIT"
] | null | null | null |
py_eegepe/met/__init__.py
|
jrmxn/py_eegepe
|
40d4c20295828f0131b790440be05571df914140
|
[
"MIT"
] | 1
|
2021-12-21T06:19:55.000Z
|
2021-12-21T06:19:55.000Z
|
from . import met_fir, met_net, met_reg, met_ukf, met_net_arch, met_shared, core
__all__ = [met_fir, met_net, met_reg, met_ukf, met_net_arch, met_shared, core]
| 79.5
| 80
| 0.786164
| 31
| 159
| 3.451613
| 0.354839
| 0.224299
| 0.168224
| 0.224299
| 0.878505
| 0.878505
| 0.878505
| 0.878505
| 0.878505
| 0.878505
| 0
| 0
| 0.113208
| 159
| 2
| 81
| 79.5
| 0.758865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
ca36061a922176672360378229f6851f8950c9cd
| 120,308
|
py
|
Python
|
tests/EVM/test_EVMSSTORE.py
|
mroll/manticore
|
d731562f7761ed9437cce406b24c815303de370c
|
[
"Apache-2.0"
] | null | null | null |
tests/EVM/test_EVMSSTORE.py
|
mroll/manticore
|
d731562f7761ed9437cce406b24c815303de370c
|
[
"Apache-2.0"
] | null | null | null |
tests/EVM/test_EVMSSTORE.py
|
mroll/manticore
|
d731562f7761ed9437cce406b24c815303de370c
|
[
"Apache-2.0"
] | null | null | null |
import struct
import unittest
import json
from manticore.platforms import evm
from manticore.core import state
from manticore.core.smtlib import Operators, ConstraintSet
import os
class EVMTest_SSTORE(unittest.TestCase):
_multiprocess_can_split_ = True
maxDiff=None
def _execute(self, new_vm):
last_returned = None
last_exception = None
try:
new_vm.execute()
except evm.Stop, e:
last_exception = "STOP"
except evm.NotEnoughGas:
last_exception = "OOG"
except evm.StackUnderflow:
last_exception = "INSUFICIENT STACK"
except evm.InvalidOpcode:
last_exception = "INVALID"
except evm.SelfDestruct:
last_exception = "SUICIDED"
except evm.Return as e:
last_exception = "RETURN"
last_returned = e.data
except evm.Revert:
last_exception = "REVERT"
return last_exception, last_returned
def test_SSTORE_1(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {115792089237316195423570985008687907853269984665640564039457584007913129639935L: 115792089237316195423570985008687907853269984665640564039457584007913129639935L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_2(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {0: 115792089237316195423570985008687907853269984665640564039457584007913129639935L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_3(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {1: 115792089237316195423570985008687907853269984665640564039457584007913129639935L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_4(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {57896044618658097711785492504343953926634992332820282019728792003956564819952L: 115792089237316195423570985008687907853269984665640564039457584007913129639935L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_5(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {3618502788666131106986593281521497120414687020801267626233049500247285301263L: 115792089237316195423570985008687907853269984665640564039457584007913129639935L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_6(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {16: 115792089237316195423570985008687907853269984665640564039457584007913129639935L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_7(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {32: 115792089237316195423570985008687907853269984665640564039457584007913129639935L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_8(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {48: 115792089237316195423570985008687907853269984665640564039457584007913129639935L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_9(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {6089590155545428825848686802984512581899718912L: 115792089237316195423570985008687907853269984665640564039457584007913129639935L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
new_vm._push(6089590155545428825848686802984512581899718912L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_10(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {115792089237316195423570985008687907853269984665640564039457584007913129639935L: 0}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(0)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_11(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {0: 0}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(0)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_12(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {1: 0}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(0)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_13(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {57896044618658097711785492504343953926634992332820282019728792003956564819952L: 0}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(0)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_14(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {3618502788666131106986593281521497120414687020801267626233049500247285301263L: 0}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(0)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_15(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {16: 0}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(0)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_16(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {32: 0}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(0)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_17(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {48: 0}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(0)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_18(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {6089590155545428825848686802984512581899718912L: 0}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(0)
new_vm._push(6089590155545428825848686802984512581899718912L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_19(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {115792089237316195423570985008687907853269984665640564039457584007913129639935L: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(1)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_20(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {0: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(1)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_21(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {1: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(1)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_22(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {57896044618658097711785492504343953926634992332820282019728792003956564819952L: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(1)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_23(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {3618502788666131106986593281521497120414687020801267626233049500247285301263L: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(1)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_24(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {16: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(1)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_25(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {32: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(1)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_26(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {48: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(1)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_27(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {6089590155545428825848686802984512581899718912L: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(1)
new_vm._push(6089590155545428825848686802984512581899718912L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_28(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {115792089237316195423570985008687907853269984665640564039457584007913129639935L: 57896044618658097711785492504343953926634992332820282019728792003956564819952L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_29(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {0: 57896044618658097711785492504343953926634992332820282019728792003956564819952L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_30(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {1: 57896044618658097711785492504343953926634992332820282019728792003956564819952L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_31(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {57896044618658097711785492504343953926634992332820282019728792003956564819952L: 57896044618658097711785492504343953926634992332820282019728792003956564819952L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_32(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {3618502788666131106986593281521497120414687020801267626233049500247285301263L: 57896044618658097711785492504343953926634992332820282019728792003956564819952L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_33(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {16: 57896044618658097711785492504343953926634992332820282019728792003956564819952L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_34(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {32: 57896044618658097711785492504343953926634992332820282019728792003956564819952L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_35(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {48: 57896044618658097711785492504343953926634992332820282019728792003956564819952L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_36(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {6089590155545428825848686802984512581899718912L: 57896044618658097711785492504343953926634992332820282019728792003956564819952L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
new_vm._push(6089590155545428825848686802984512581899718912L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_37(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {115792089237316195423570985008687907853269984665640564039457584007913129639935L: 3618502788666131106986593281521497120414687020801267626233049500247285301263L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_38(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {0: 3618502788666131106986593281521497120414687020801267626233049500247285301263L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_39(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {1: 3618502788666131106986593281521497120414687020801267626233049500247285301263L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_40(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {57896044618658097711785492504343953926634992332820282019728792003956564819952L: 3618502788666131106986593281521497120414687020801267626233049500247285301263L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_41(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {3618502788666131106986593281521497120414687020801267626233049500247285301263L: 3618502788666131106986593281521497120414687020801267626233049500247285301263L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_42(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {16: 3618502788666131106986593281521497120414687020801267626233049500247285301263L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_43(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {32: 3618502788666131106986593281521497120414687020801267626233049500247285301263L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_44(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {48: 3618502788666131106986593281521497120414687020801267626233049500247285301263L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_45(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {6089590155545428825848686802984512581899718912L: 3618502788666131106986593281521497120414687020801267626233049500247285301263L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
new_vm._push(6089590155545428825848686802984512581899718912L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_46(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {115792089237316195423570985008687907853269984665640564039457584007913129639935L: 16}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(16)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_47(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {0: 16}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(16)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_48(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {1: 16}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(16)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_49(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {57896044618658097711785492504343953926634992332820282019728792003956564819952L: 16}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(16)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_50(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {3618502788666131106986593281521497120414687020801267626233049500247285301263L: 16}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(16)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_51(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {16: 16}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(16)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_52(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {32: 16}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(16)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_53(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {48: 16}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(16)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_54(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {6089590155545428825848686802984512581899718912L: 16}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(16)
new_vm._push(6089590155545428825848686802984512581899718912L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_55(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {115792089237316195423570985008687907853269984665640564039457584007913129639935L: 32}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(32)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_56(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {0: 32}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(32)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_57(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {1: 32}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(32)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_58(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {57896044618658097711785492504343953926634992332820282019728792003956564819952L: 32}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(32)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_59(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {3618502788666131106986593281521497120414687020801267626233049500247285301263L: 32}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(32)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_60(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {16: 32}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(32)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_61(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {32: 32}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(32)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_62(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {48: 32}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(32)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_63(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {6089590155545428825848686802984512581899718912L: 32}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(32)
new_vm._push(6089590155545428825848686802984512581899718912L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_64(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {115792089237316195423570985008687907853269984665640564039457584007913129639935L: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(48)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_65(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {0: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(48)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_66(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {1: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(48)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_67(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {57896044618658097711785492504343953926634992332820282019728792003956564819952L: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(48)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_68(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {3618502788666131106986593281521497120414687020801267626233049500247285301263L: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(48)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_69(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {16: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(48)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_70(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {32: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(48)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_71(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {48: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(48)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_72(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {6089590155545428825848686802984512581899718912L: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(48)
new_vm._push(6089590155545428825848686802984512581899718912L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_73(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {115792089237316195423570985008687907853269984665640564039457584007913129639935L: 6089590155545428825848686802984512581899718912L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(6089590155545428825848686802984512581899718912L)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_74(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {0: 6089590155545428825848686802984512581899718912L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(6089590155545428825848686802984512581899718912L)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_75(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {1: 6089590155545428825848686802984512581899718912L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(6089590155545428825848686802984512581899718912L)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_76(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {57896044618658097711785492504343953926634992332820282019728792003956564819952L: 6089590155545428825848686802984512581899718912L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(6089590155545428825848686802984512581899718912L)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_77(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {3618502788666131106986593281521497120414687020801267626233049500247285301263L: 6089590155545428825848686802984512581899718912L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(6089590155545428825848686802984512581899718912L)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_78(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {16: 6089590155545428825848686802984512581899718912L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(6089590155545428825848686802984512581899718912L)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_79(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {32: 6089590155545428825848686802984512581899718912L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(6089590155545428825848686802984512581899718912L)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_80(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {48: 6089590155545428825848686802984512581899718912L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(6089590155545428825848686802984512581899718912L)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_81(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {6089590155545428825848686802984512581899718912L: 6089590155545428825848686802984512581899718912L}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, origin, price, data, caller, value, bytecode, header, gas=gas, global_storage=world.storage)
new_vm._push(6089590155545428825848686802984512581899718912L)
new_vm._push(6089590155545428825848686802984512581899718912L)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
if __name__ == '__main__':
unittest.main()
| 43.07483
| 184
| 0.585422
| 9,588
| 120,308
| 7.207655
| 0.018252
| 0.035308
| 0.021098
| 0.046884
| 0.988409
| 0.988409
| 0.988409
| 0.988409
| 0.988409
| 0.988409
| 0
| 0.272916
| 0.339952
| 120,308
| 2,792
| 185
| 43.090258
| 0.59735
| 0.036357
| 0
| 0.880052
| 0
| 0
| 0.051585
| 0.02099
| 0
| 0
| 0.086058
| 0
| 0.105606
| 0
| null | null | 0
| 0.003042
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ca4b6ff7b3ad532c8dcfa0981b80c74adcdab0b7
| 9,358
|
py
|
Python
|
userbot/modules/fakeload.py
|
jefa2231/SenturyUbot
|
c9123ffca263265408280b8fbe1418ba82c36dd4
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/fakeload.py
|
jefa2231/SenturyUbot
|
c9123ffca263265408280b8fbe1418ba82c36dd4
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/fakeload.py
|
jefa2231/SenturyUbot
|
c9123ffca263265408280b8fbe1418ba82c36dd4
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# made by @DneZyeK
# Port to UserBot by @MoveAngel
import asyncio
import re
import time
from time import sleep
from userbot import CMD_HELP, ZALG_LIST
from userbot.events import register
@register(outgoing=True, pattern="^.fl(?: |$)(.*)")
async def typewriter(typew):
message = typew.pattern_match.group(1)
await typew.edit("`start loading makan tokek goreng!...`")
sleep(4)
await typew.edit("0%")
number = 1
await typew.edit(str(number) + "% ▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████████")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████████▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████████▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████████▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████████▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████████▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████████")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████████▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████████▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████████▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████████▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████████▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████████")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████████▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████████▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████████▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████████▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████████▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████████")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████████▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████████▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████████▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████████▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████████▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████████████")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████████████▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████████████▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████████████▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████████████▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% █████████████▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████████████")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████████████▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████████████▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████████████▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████████████▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ██████████████▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████████████")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████████████▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████████████▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████████████▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████████████▊")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ███████████████▉")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████████████")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████████████▎")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████████████▍")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████████████▌")
number = number+ 1
sleep(0.03)
await typew.edit(str(number) + "% ████████████████▌")
sleep(1)
await typew.edit("Tokek Goreng Siap di Santap! Jangan lupa Menu penutup nya [DISINI](http://javbus.com)")
# I did it for two hours :D just ctrl+c - crtl+v
CMD_HELP.update({
"fakeload":
".fl\
\nUsage: Tokek Goreng Dan makanan penutup."
})
| 28.443769
| 107
| 0.535478
| 1,327
| 9,358
| 4.439337
| 0.144687
| 0.174843
| 0.24478
| 0.288576
| 0.749109
| 0.745205
| 0.745205
| 0.745205
| 0.745205
| 0.745205
| 0
| 0.054895
| 0.20966
| 9,358
| 329
| 108
| 28.443769
| 0.622093
| 0.030348
| 0
| 0.628931
| 0
| 0
| 0.163882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.018868
| 0
| 0.018868
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ca91ff8158dd55038d64a28ff60e2f577c138762
| 102
|
py
|
Python
|
pwmodel/__init__.py
|
guanleustc/pwmodel
|
c1df703889fe53e82f0f1201c7c6ec75d94b99a0
|
[
"Apache-2.0"
] | null | null | null |
pwmodel/__init__.py
|
guanleustc/pwmodel
|
c1df703889fe53e82f0f1201c7c6ec75d94b99a0
|
[
"Apache-2.0"
] | null | null | null |
pwmodel/__init__.py
|
guanleustc/pwmodel
|
c1df703889fe53e82f0f1201c7c6ec75d94b99a0
|
[
"Apache-2.0"
] | null | null | null |
from .models import NGramPw, PcfgPw, HistPw
from . import helper
from .models import fast_fuzzysearch
| 25.5
| 43
| 0.813725
| 14
| 102
| 5.857143
| 0.642857
| 0.243902
| 0.390244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 102
| 3
| 44
| 34
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0477a0685f75c7252d34609b0e9bbdfdcdd39c11
| 1,881
|
py
|
Python
|
congress/migrations/0014_auto_20220123_2028.py
|
InsiderUnlocked/Backend
|
efd71d3eb874b9bfdbca89d0aa9e610338f9fa52
|
[
"blessing"
] | 3
|
2022-01-22T06:53:52.000Z
|
2022-02-13T10:16:29.000Z
|
congress/migrations/0014_auto_20220123_2028.py
|
InsiderUnlocked/Backend
|
efd71d3eb874b9bfdbca89d0aa9e610338f9fa52
|
[
"blessing"
] | null | null | null |
congress/migrations/0014_auto_20220123_2028.py
|
InsiderUnlocked/Backend
|
efd71d3eb874b9bfdbca89d0aa9e610338f9fa52
|
[
"blessing"
] | null | null | null |
# Generated by Django 3.2.3 on 2022-01-24 01:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('congress', '0013_auto_20220121_1356'),
]
operations = [
migrations.AlterField(
model_name='congressperson',
name='fullName',
field=models.CharField(max_length=1000, unique=True),
),
migrations.AlterField(
model_name='congressperson',
name='purchases',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='congressperson',
name='sales',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='congressperson',
name='totalTransactions',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='congressperson',
name='totalVolumeTransactions',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='ticker',
name='purchases',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='ticker',
name='sales',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='ticker',
name='totalTransactions',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='ticker',
name='totalVolumeTransactions',
field=models.IntegerField(blank=True, default=0, null=True),
),
]
| 31.881356
| 72
| 0.574694
| 168
| 1,881
| 6.357143
| 0.27381
| 0.168539
| 0.210674
| 0.244382
| 0.807116
| 0.753745
| 0.669476
| 0.625468
| 0.625468
| 0.625468
| 0
| 0.033102
| 0.30941
| 1,881
| 58
| 73
| 32.431034
| 0.789069
| 0.023923
| 0
| 0.807692
| 1
| 0
| 0.131407
| 0.037623
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.019231
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
04fae57a193008bfe50ec4a9d151b9177eb8c5e2
| 29,307
|
py
|
Python
|
tests/open_alchemy/schemas/validation/property_/relationship/full/test_backref_properties.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 40
|
2019-11-05T06:50:35.000Z
|
2022-03-09T01:34:57.000Z
|
tests/open_alchemy/schemas/validation/property_/relationship/full/test_backref_properties.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 178
|
2019-11-03T04:10:38.000Z
|
2022-03-31T00:07:17.000Z
|
tests/open_alchemy/schemas/validation/property_/relationship/full/test_backref_properties.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 17
|
2019-11-04T07:22:46.000Z
|
2022-03-23T05:29:49.000Z
|
"""Tests for full relationship schema checking."""
import pytest
from open_alchemy.schemas.validation.property_.relationship import full
TESTS = [
pytest.param(
{},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {"type": "object", "properties": {"ref_schema": {}}},
},
},
}
},
(
False,
"backref property :: items :: properties :: properties cannot contain the "
"property name of the relartionship to avoid circular references",
),
id="many-to-one back reference has property name",
),
pytest.param(
{"properties": {}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {"type": "object", "properties": {"id": {}}},
},
},
}
},
(
False,
"backref property :: items :: properties :: could not find id in the model "
"schema",
),
id="many-to-one back reference has property not in schema",
),
pytest.param(
{"properties": {"id": {"type": "integer"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {"type": "object", "properties": {"id": True}},
},
},
}
},
(
False,
"backref property :: items :: properties :: id :: property values must be "
"dictionaries",
),
id="many-to-one back reference has property not dictionary",
),
pytest.param(
{"properties": {"id": {"type": "integer"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {"type": "object", "properties": {"id": {}}},
},
},
}
},
(
False,
"backref property :: malformed schema :: Every property requires a type. ",
),
id="many-to-one back reference has property no type",
),
pytest.param(
{"properties": {"id": {"type": "integer"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {"id": {"type": "string"}},
},
},
},
}
},
(
False,
"backref property :: items :: properties :: id :: type :: expected "
'"integer", actual is "string"',
),
id="many-to-one back reference has property different type",
),
pytest.param(
{"properties": {"id": {"type": "integer"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {"id": {"type": "integer"}},
},
},
},
}
},
(True, None),
id="many-to-one back reference has property",
),
pytest.param(
{"properties": {"id": {"type": "integer"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {
"$ref": "#/components/schemas/BackReferenceProperty"
}
},
},
},
},
},
"BackReferenceProperty": {"type": "integer"},
},
(True, None),
id="many-to-one back reference has property $ref",
),
pytest.param(
{"properties": {"id": {"type": "integer"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {"id": {"allOf": [{"type": "integer"}]}},
},
},
},
}
},
(True, None),
id="many-to-one back reference has property allOf",
),
pytest.param(
{"properties": {"id": {"type": "integer"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {"id": {"type": "integer", "format": True}},
},
},
},
}
},
(
False,
"backref property :: malformed schema :: A format value must be of type "
"string. ",
),
id="many-to-one back reference has property format not string",
),
pytest.param(
{"properties": {"id": {"type": "integer", "format": "format 2"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "integer", "format": "format 1"}
},
},
},
},
}
},
(
False,
"backref property :: items :: properties :: id :: format :: expected "
'"format 2", actual is "format 1"',
),
id="many-to-one back reference has property format different",
),
pytest.param(
{"properties": {"id": {"type": "integer", "format": "format 1"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "integer", "format": "format 1"}
},
},
},
},
}
},
(True, None),
id="many-to-one back reference has property format",
),
pytest.param(
{"properties": {"id": {"type": "integer"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "integer", "maxLength": True}
},
},
},
},
}
},
(
False,
"backref property :: malformed schema :: A maxLength value must be of type "
"integer. ",
),
id="many-to-one back reference has property maxLength not string",
),
pytest.param(
{"properties": {"id": {"type": "integer", "maxLength": 2}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {"id": {"type": "integer", "maxLength": 1}},
},
},
},
}
},
(
False,
"backref property :: items :: properties :: id :: maxLength :: expected "
'"2", actual is "1"',
),
id="many-to-one back reference has property maxLength different",
),
pytest.param(
{"properties": {"id": {"type": "integer", "maxLength": 1}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {"id": {"type": "integer", "maxLength": 1}},
},
},
},
}
},
(
True,
None,
),
id="many-to-one back reference has property maxLength",
),
pytest.param(
{"properties": {"id": {"type": "integer"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {"id": {"type": "integer", "default": True}},
},
},
},
}
},
(
False,
"backref property :: malformed schema :: The default value does not "
"conform to the schema. The value is: True ",
),
id="many-to-one back reference has property default not string",
),
pytest.param(
{"properties": {"id": {"type": "integer", "default": 2}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {"id": {"type": "integer", "default": 1}},
},
},
},
}
},
(
False,
'backref property :: items :: properties :: id :: default :: expected "2", '
'actual is "1"',
),
id="many-to-one back reference has property default different",
),
pytest.param(
{"properties": {"id": {"type": "integer", "default": 1}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {"id": {"type": "integer", "default": 1}},
},
},
},
}
},
(
True,
None,
),
id="many-to-one back reference has property default",
),
pytest.param(
{"properties": {"name": {"type": "string"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "integer"},
"name": {"type": "string"},
},
},
},
},
}
},
(
False,
"backref property :: items :: properties :: could not find id in the model "
"schema",
),
id="many-to-one back reference has multiple property first not defined",
),
pytest.param(
{"properties": {"id": {"type": "integer"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "integer"},
"name": {"type": "string"},
},
},
},
},
}
},
(
False,
"backref property :: items :: properties :: could not find name in the "
"model schema",
),
id="many-to-one back reference has multiple property second not defined",
),
pytest.param(
{"properties": {"id": {"type": "integer"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"allOf": [
{
"type": "object",
"properties": {"id": {"type": "integer"}},
},
{
"type": "object",
"properties": {"name": {"type": "string"}},
},
]
},
},
},
}
},
(
False,
"backref property :: items :: properties :: could not find name in the "
"model schema",
),
id="many-to-one back reference allOF multiple property second not defined",
),
pytest.param(
{"properties": {"id": {"type": "integer"}, "name": {"type": "string"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"name": {"type": "string"},
},
},
},
},
}
},
(
False,
"backref property :: items :: properties :: id :: type :: expected "
'"integer", actual is "string"',
),
id="many-to-one back reference has multiple property first wrong type",
),
pytest.param(
{"properties": {"id": {"type": "integer"}, "name": {"type": "string"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "integer"},
"name": {"type": "integer"},
},
},
},
},
}
},
(
False,
"backref property :: items :: properties :: name :: type :: expected "
'"string", actual is "integer"',
),
id="many-to-one back reference has multiple property second wrong type",
),
pytest.param(
{"properties": {"id": {"type": "integer"}, "name": {"type": "string"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"allOf": [
{
"type": "object",
"properties": {"id": {"type": "integer"}},
},
{
"type": "object",
"properties": {"name": {"type": "integer"}},
},
]
},
},
},
}
},
(
False,
"backref property :: items :: properties :: name :: type :: expected "
'"string", actual is "integer"',
),
id="many-to-one back reference allOf multiple property second wrong type",
),
pytest.param(
{"properties": {"id": {"type": "integer"}, "name": {"type": "string"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "integer"},
"name": {"type": "string"},
},
},
},
},
}
},
(True, None),
id="many-to-one back reference has multiple property",
),
pytest.param(
{"properties": {"id": {"type": "integer"}, "name": {"type": "string"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"allOf": [
{
"type": "object",
"properties": {"id": {"type": "integer"}},
},
{
"type": "object",
"properties": {"name": {"type": "string"}},
},
]
},
},
},
}
},
(True, None),
id="many-to-one back reference allOf multiple property",
),
pytest.param(
{"properties": {"id": {"type": "integer"}}},
"ref_schema",
{"$ref": "#/components/schemas/RefSchema"},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schema",
"x-uselist": False,
"type": "object",
"properties": {
"id": {"type": "integer"},
"schema": {
"readOnly": True,
"type": "object",
"properties": {"id": {"type": "string"}},
},
},
}
},
(
False,
'backref property :: properties :: id :: type :: expected "integer", '
'actual is "string"',
),
id="one-to-one back reference property wrong type",
),
pytest.param(
{
"type": "object",
"x-tablename": "schema",
"properties": {"id": {"type": "integer"}},
},
"ref_schema",
{"type": "array", "items": {"$ref": "#/components/schemas/RefSchema"}},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schema",
"type": "object",
"properties": {
"id": {"type": "integer"},
"schema": {
"readOnly": True,
"type": "object",
"properties": {"id": {"type": "string"}},
},
},
}
},
(
False,
'backref property :: properties :: id :: type :: expected "integer", '
'actual is "string"',
),
id="one-to-many back reference property wrong type",
),
pytest.param(
{
"type": "object",
"x-tablename": "schemas",
"properties": {"id": {"type": "integer", "x-primary-key": True}},
},
"ref_schema",
{"type": "array", "items": {"$ref": "#/components/schemas/RefSchema"}},
{
"RefSchema": {
"x-tablename": "ref_schema",
"x-backref": "schemas",
"x-secondary": "schema_ref_schema",
"type": "object",
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"schemas": {
"readOnly": True,
"type": "array",
"items": {
"type": "object",
"properties": {"id": {"type": "string"}},
},
},
},
}
},
(
False,
"backref property :: items :: properties :: id :: type :: expected "
'"integer", actual is "string"',
),
id="many-to-many back reference property wrong type",
),
]
@pytest.mark.parametrize(
"parent_schema, property_name, property_schema, schemas, expected_result",
TESTS,
)
@pytest.mark.validate
@pytest.mark.schemas
def test_check(parent_schema, property_name, property_schema, schemas, expected_result):
"""
GIVEN schemas, the parent and property schema and the expected result
WHEN check is called with the schemas and parent and property schema
THEN the expected result is returned.
"""
returned_result = full.check(schemas, parent_schema, property_name, property_schema)
assert returned_result == expected_result
| 34.357562
| 88
| 0.340055
| 1,816
| 29,307
| 5.446035
| 0.056167
| 0.107988
| 0.129424
| 0.162791
| 0.911527
| 0.903033
| 0.889181
| 0.85541
| 0.816178
| 0.773711
| 0
| 0.001244
| 0.506261
| 29,307
| 852
| 89
| 34.397887
| 0.682239
| 0.007541
| 0
| 0.645238
| 0
| 0
| 0.334756
| 0.031077
| 0
| 0
| 0
| 0
| 0.00119
| 1
| 0.00119
| false
| 0
| 0.002381
| 0
| 0.003571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b6f3a68ec67d7580988e8b56a652acf82a243e9a
| 32,310
|
py
|
Python
|
sdk/python/pulumi_gcp/notebooks/runtime.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/notebooks/runtime.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/notebooks/runtime.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RuntimeArgs', 'Runtime']
@pulumi.input_type
class RuntimeArgs:
def __init__(__self__, *,
location: pulumi.Input[str],
access_config: Optional[pulumi.Input['RuntimeAccessConfigArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
software_config: Optional[pulumi.Input['RuntimeSoftwareConfigArgs']] = None,
virtual_machine: Optional[pulumi.Input['RuntimeVirtualMachineArgs']] = None):
"""
The set of arguments for constructing a Runtime resource.
:param pulumi.Input[str] location: A reference to the zone where the machine resides.
:param pulumi.Input['RuntimeAccessConfigArgs'] access_config: The config settings for accessing runtime.
Structure is documented below.
:param pulumi.Input[str] name: The name specified for the Notebook instance.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input['RuntimeSoftwareConfigArgs'] software_config: The config settings for software inside the runtime.
Structure is documented below.
:param pulumi.Input['RuntimeVirtualMachineArgs'] virtual_machine: Use a Compute Engine VM image to start the managed notebook instance.
Structure is documented below.
"""
pulumi.set(__self__, "location", location)
if access_config is not None:
pulumi.set(__self__, "access_config", access_config)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if software_config is not None:
pulumi.set(__self__, "software_config", software_config)
if virtual_machine is not None:
pulumi.set(__self__, "virtual_machine", virtual_machine)
@property
@pulumi.getter
def location(self) -> pulumi.Input[str]:
"""
A reference to the zone where the machine resides.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: pulumi.Input[str]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="accessConfig")
def access_config(self) -> Optional[pulumi.Input['RuntimeAccessConfigArgs']]:
"""
The config settings for accessing runtime.
Structure is documented below.
"""
return pulumi.get(self, "access_config")
@access_config.setter
def access_config(self, value: Optional[pulumi.Input['RuntimeAccessConfigArgs']]):
pulumi.set(self, "access_config", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name specified for the Notebook instance.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="softwareConfig")
def software_config(self) -> Optional[pulumi.Input['RuntimeSoftwareConfigArgs']]:
"""
The config settings for software inside the runtime.
Structure is documented below.
"""
return pulumi.get(self, "software_config")
@software_config.setter
def software_config(self, value: Optional[pulumi.Input['RuntimeSoftwareConfigArgs']]):
pulumi.set(self, "software_config", value)
@property
@pulumi.getter(name="virtualMachine")
def virtual_machine(self) -> Optional[pulumi.Input['RuntimeVirtualMachineArgs']]:
"""
Use a Compute Engine VM image to start the managed notebook instance.
Structure is documented below.
"""
return pulumi.get(self, "virtual_machine")
@virtual_machine.setter
def virtual_machine(self, value: Optional[pulumi.Input['RuntimeVirtualMachineArgs']]):
pulumi.set(self, "virtual_machine", value)
@pulumi.input_type
class _RuntimeState:
def __init__(__self__, *,
access_config: Optional[pulumi.Input['RuntimeAccessConfigArgs']] = None,
health_state: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
metrics: Optional[pulumi.Input[Sequence[pulumi.Input['RuntimeMetricArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
software_config: Optional[pulumi.Input['RuntimeSoftwareConfigArgs']] = None,
state: Optional[pulumi.Input[str]] = None,
virtual_machine: Optional[pulumi.Input['RuntimeVirtualMachineArgs']] = None):
"""
Input properties used for looking up and filtering Runtime resources.
:param pulumi.Input['RuntimeAccessConfigArgs'] access_config: The config settings for accessing runtime.
Structure is documented below.
:param pulumi.Input[str] health_state: The health state of this runtime. For a list of possible output values, see
'https://cloud.google.com/vertex-ai/docs/workbench/ reference/rest/v1/projects.locations.runtimes#healthstate'.
:param pulumi.Input[str] location: A reference to the zone where the machine resides.
:param pulumi.Input[Sequence[pulumi.Input['RuntimeMetricArgs']]] metrics: Contains Runtime daemon metrics such as Service status and JupyterLab status
:param pulumi.Input[str] name: The name specified for the Notebook instance.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input['RuntimeSoftwareConfigArgs'] software_config: The config settings for software inside the runtime.
Structure is documented below.
:param pulumi.Input[str] state: The state of this runtime.
:param pulumi.Input['RuntimeVirtualMachineArgs'] virtual_machine: Use a Compute Engine VM image to start the managed notebook instance.
Structure is documented below.
"""
if access_config is not None:
pulumi.set(__self__, "access_config", access_config)
if health_state is not None:
pulumi.set(__self__, "health_state", health_state)
if location is not None:
pulumi.set(__self__, "location", location)
if metrics is not None:
pulumi.set(__self__, "metrics", metrics)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if software_config is not None:
pulumi.set(__self__, "software_config", software_config)
if state is not None:
pulumi.set(__self__, "state", state)
if virtual_machine is not None:
pulumi.set(__self__, "virtual_machine", virtual_machine)
@property
@pulumi.getter(name="accessConfig")
def access_config(self) -> Optional[pulumi.Input['RuntimeAccessConfigArgs']]:
"""
The config settings for accessing runtime.
Structure is documented below.
"""
return pulumi.get(self, "access_config")
@access_config.setter
def access_config(self, value: Optional[pulumi.Input['RuntimeAccessConfigArgs']]):
pulumi.set(self, "access_config", value)
@property
@pulumi.getter(name="healthState")
def health_state(self) -> Optional[pulumi.Input[str]]:
"""
The health state of this runtime. For a list of possible output values, see
'https://cloud.google.com/vertex-ai/docs/workbench/ reference/rest/v1/projects.locations.runtimes#healthstate'.
"""
return pulumi.get(self, "health_state")
@health_state.setter
def health_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_state", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
A reference to the zone where the machine resides.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuntimeMetricArgs']]]]:
"""
Contains Runtime daemon metrics such as Service status and JupyterLab status
"""
return pulumi.get(self, "metrics")
@metrics.setter
def metrics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RuntimeMetricArgs']]]]):
pulumi.set(self, "metrics", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name specified for the Notebook instance.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="softwareConfig")
def software_config(self) -> Optional[pulumi.Input['RuntimeSoftwareConfigArgs']]:
"""
The config settings for software inside the runtime.
Structure is documented below.
"""
return pulumi.get(self, "software_config")
@software_config.setter
def software_config(self, value: Optional[pulumi.Input['RuntimeSoftwareConfigArgs']]):
pulumi.set(self, "software_config", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The state of this runtime.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="virtualMachine")
def virtual_machine(self) -> Optional[pulumi.Input['RuntimeVirtualMachineArgs']]:
"""
Use a Compute Engine VM image to start the managed notebook instance.
Structure is documented below.
"""
return pulumi.get(self, "virtual_machine")
@virtual_machine.setter
def virtual_machine(self, value: Optional[pulumi.Input['RuntimeVirtualMachineArgs']]):
pulumi.set(self, "virtual_machine", value)
class Runtime(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_config: Optional[pulumi.Input[pulumi.InputType['RuntimeAccessConfigArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
software_config: Optional[pulumi.Input[pulumi.InputType['RuntimeSoftwareConfigArgs']]] = None,
virtual_machine: Optional[pulumi.Input[pulumi.InputType['RuntimeVirtualMachineArgs']]] = None,
__props__=None):
"""
A Cloud AI Platform Notebook runtime.
> **Note:** Due to limitations of the Notebooks Runtime API, many fields
in this resource do not properly detect drift. These fields will also not
appear in state once imported.
To get more information about Runtime, see:
* [API documentation](https://cloud.google.com/ai-platform/notebooks/docs/reference/rest)
* How-to Guides
* [Official Documentation](https://cloud.google.com/ai-platform-notebooks)
## Example Usage
### Notebook Runtime Basic
```python
import pulumi
import pulumi_gcp as gcp
runtime = gcp.notebooks.Runtime("runtime",
access_config=gcp.notebooks.RuntimeAccessConfigArgs(
access_type="SINGLE_USER",
runtime_owner="admin@hashicorptest.com",
),
location="us-central1",
virtual_machine=gcp.notebooks.RuntimeVirtualMachineArgs(
virtual_machine_config=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigArgs(
data_disk=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDiskArgs(
initialize_params=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsArgs(
disk_size_gb=100,
disk_type="PD_STANDARD",
),
),
machine_type="n1-standard-4",
),
))
```
### Notebook Runtime Basic Gpu
```python
import pulumi
import pulumi_gcp as gcp
runtime_gpu = gcp.notebooks.Runtime("runtimeGpu",
access_config=gcp.notebooks.RuntimeAccessConfigArgs(
access_type="SINGLE_USER",
runtime_owner="admin@hashicorptest.com",
),
location="us-central1",
software_config=gcp.notebooks.RuntimeSoftwareConfigArgs(
install_gpu_driver=True,
),
virtual_machine=gcp.notebooks.RuntimeVirtualMachineArgs(
virtual_machine_config=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigArgs(
accelerator_config=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigArgs(
core_count=1,
type="NVIDIA_TESLA_V100",
),
data_disk=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDiskArgs(
initialize_params=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsArgs(
disk_size_gb=100,
disk_type="PD_STANDARD",
),
),
machine_type="n1-standard-4",
),
))
```
### Notebook Runtime Basic Container
```python
import pulumi
import pulumi_gcp as gcp
runtime_container = gcp.notebooks.Runtime("runtimeContainer",
access_config=gcp.notebooks.RuntimeAccessConfigArgs(
access_type="SINGLE_USER",
runtime_owner="admin@hashicorptest.com",
),
location="us-central1",
virtual_machine=gcp.notebooks.RuntimeVirtualMachineArgs(
virtual_machine_config=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigArgs(
container_images=[
gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigContainerImageArgs(
repository="gcr.io/deeplearning-platform-release/base-cpu",
tag="latest",
),
gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigContainerImageArgs(
repository="gcr.io/deeplearning-platform-release/beam-notebooks",
tag="latest",
),
],
data_disk=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDiskArgs(
initialize_params=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsArgs(
disk_size_gb=100,
disk_type="PD_STANDARD",
),
),
machine_type="n1-standard-4",
),
))
```
## Import
Runtime can be imported using any of these accepted formats
```sh
$ pulumi import gcp:notebooks/runtime:Runtime default projects/{{project}}/locations/{{location}}/runtimes/{{name}}
```
```sh
$ pulumi import gcp:notebooks/runtime:Runtime default {{project}}/{{location}}/{{name}}
```
```sh
$ pulumi import gcp:notebooks/runtime:Runtime default {{location}}/{{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['RuntimeAccessConfigArgs']] access_config: The config settings for accessing runtime.
Structure is documented below.
:param pulumi.Input[str] location: A reference to the zone where the machine resides.
:param pulumi.Input[str] name: The name specified for the Notebook instance.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[pulumi.InputType['RuntimeSoftwareConfigArgs']] software_config: The config settings for software inside the runtime.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['RuntimeVirtualMachineArgs']] virtual_machine: Use a Compute Engine VM image to start the managed notebook instance.
Structure is documented below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RuntimeArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A Cloud AI Platform Notebook runtime.
> **Note:** Due to limitations of the Notebooks Runtime API, many fields
in this resource do not properly detect drift. These fields will also not
appear in state once imported.
To get more information about Runtime, see:
* [API documentation](https://cloud.google.com/ai-platform/notebooks/docs/reference/rest)
* How-to Guides
* [Official Documentation](https://cloud.google.com/ai-platform-notebooks)
## Example Usage
### Notebook Runtime Basic
```python
import pulumi
import pulumi_gcp as gcp
runtime = gcp.notebooks.Runtime("runtime",
access_config=gcp.notebooks.RuntimeAccessConfigArgs(
access_type="SINGLE_USER",
runtime_owner="admin@hashicorptest.com",
),
location="us-central1",
virtual_machine=gcp.notebooks.RuntimeVirtualMachineArgs(
virtual_machine_config=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigArgs(
data_disk=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDiskArgs(
initialize_params=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsArgs(
disk_size_gb=100,
disk_type="PD_STANDARD",
),
),
machine_type="n1-standard-4",
),
))
```
### Notebook Runtime Basic Gpu
```python
import pulumi
import pulumi_gcp as gcp
runtime_gpu = gcp.notebooks.Runtime("runtimeGpu",
access_config=gcp.notebooks.RuntimeAccessConfigArgs(
access_type="SINGLE_USER",
runtime_owner="admin@hashicorptest.com",
),
location="us-central1",
software_config=gcp.notebooks.RuntimeSoftwareConfigArgs(
install_gpu_driver=True,
),
virtual_machine=gcp.notebooks.RuntimeVirtualMachineArgs(
virtual_machine_config=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigArgs(
accelerator_config=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigArgs(
core_count=1,
type="NVIDIA_TESLA_V100",
),
data_disk=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDiskArgs(
initialize_params=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsArgs(
disk_size_gb=100,
disk_type="PD_STANDARD",
),
),
machine_type="n1-standard-4",
),
))
```
### Notebook Runtime Basic Container
```python
import pulumi
import pulumi_gcp as gcp
runtime_container = gcp.notebooks.Runtime("runtimeContainer",
access_config=gcp.notebooks.RuntimeAccessConfigArgs(
access_type="SINGLE_USER",
runtime_owner="admin@hashicorptest.com",
),
location="us-central1",
virtual_machine=gcp.notebooks.RuntimeVirtualMachineArgs(
virtual_machine_config=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigArgs(
container_images=[
gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigContainerImageArgs(
repository="gcr.io/deeplearning-platform-release/base-cpu",
tag="latest",
),
gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigContainerImageArgs(
repository="gcr.io/deeplearning-platform-release/beam-notebooks",
tag="latest",
),
],
data_disk=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDiskArgs(
initialize_params=gcp.notebooks.RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsArgs(
disk_size_gb=100,
disk_type="PD_STANDARD",
),
),
machine_type="n1-standard-4",
),
))
```
## Import
Runtime can be imported using any of these accepted formats
```sh
$ pulumi import gcp:notebooks/runtime:Runtime default projects/{{project}}/locations/{{location}}/runtimes/{{name}}
```
```sh
$ pulumi import gcp:notebooks/runtime:Runtime default {{project}}/{{location}}/{{name}}
```
```sh
$ pulumi import gcp:notebooks/runtime:Runtime default {{location}}/{{name}}
```
:param str resource_name: The name of the resource.
:param RuntimeArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RuntimeArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_config: Optional[pulumi.Input[pulumi.InputType['RuntimeAccessConfigArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
software_config: Optional[pulumi.Input[pulumi.InputType['RuntimeSoftwareConfigArgs']]] = None,
virtual_machine: Optional[pulumi.Input[pulumi.InputType['RuntimeVirtualMachineArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RuntimeArgs.__new__(RuntimeArgs)
__props__.__dict__["access_config"] = access_config
if location is None and not opts.urn:
raise TypeError("Missing required property 'location'")
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["software_config"] = software_config
__props__.__dict__["virtual_machine"] = virtual_machine
__props__.__dict__["health_state"] = None
__props__.__dict__["metrics"] = None
__props__.__dict__["state"] = None
super(Runtime, __self__).__init__(
'gcp:notebooks/runtime:Runtime',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_config: Optional[pulumi.Input[pulumi.InputType['RuntimeAccessConfigArgs']]] = None,
health_state: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
metrics: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuntimeMetricArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
software_config: Optional[pulumi.Input[pulumi.InputType['RuntimeSoftwareConfigArgs']]] = None,
state: Optional[pulumi.Input[str]] = None,
virtual_machine: Optional[pulumi.Input[pulumi.InputType['RuntimeVirtualMachineArgs']]] = None) -> 'Runtime':
"""
Get an existing Runtime resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['RuntimeAccessConfigArgs']] access_config: The config settings for accessing runtime.
Structure is documented below.
:param pulumi.Input[str] health_state: The health state of this runtime. For a list of possible output values, see
'https://cloud.google.com/vertex-ai/docs/workbench/ reference/rest/v1/projects.locations.runtimes#healthstate'.
:param pulumi.Input[str] location: A reference to the zone where the machine resides.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuntimeMetricArgs']]]] metrics: Contains Runtime daemon metrics such as Service status and JupyterLab status
:param pulumi.Input[str] name: The name specified for the Notebook instance.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[pulumi.InputType['RuntimeSoftwareConfigArgs']] software_config: The config settings for software inside the runtime.
Structure is documented below.
:param pulumi.Input[str] state: The state of this runtime.
:param pulumi.Input[pulumi.InputType['RuntimeVirtualMachineArgs']] virtual_machine: Use a Compute Engine VM image to start the managed notebook instance.
Structure is documented below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RuntimeState.__new__(_RuntimeState)
__props__.__dict__["access_config"] = access_config
__props__.__dict__["health_state"] = health_state
__props__.__dict__["location"] = location
__props__.__dict__["metrics"] = metrics
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["software_config"] = software_config
__props__.__dict__["state"] = state
__props__.__dict__["virtual_machine"] = virtual_machine
return Runtime(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessConfig")
def access_config(self) -> pulumi.Output[Optional['outputs.RuntimeAccessConfig']]:
"""
The config settings for accessing runtime.
Structure is documented below.
"""
return pulumi.get(self, "access_config")
@property
@pulumi.getter(name="healthState")
def health_state(self) -> pulumi.Output[str]:
"""
The health state of this runtime. For a list of possible output values, see
'https://cloud.google.com/vertex-ai/docs/workbench/ reference/rest/v1/projects.locations.runtimes#healthstate'.
"""
return pulumi.get(self, "health_state")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
A reference to the zone where the machine resides.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def metrics(self) -> pulumi.Output[Sequence['outputs.RuntimeMetric']]:
"""
Contains Runtime daemon metrics such as Service status and JupyterLab status
"""
return pulumi.get(self, "metrics")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name specified for the Notebook instance.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="softwareConfig")
def software_config(self) -> pulumi.Output['outputs.RuntimeSoftwareConfig']:
"""
The config settings for software inside the runtime.
Structure is documented below.
"""
return pulumi.get(self, "software_config")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The state of this runtime.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="virtualMachine")
def virtual_machine(self) -> pulumi.Output[Optional['outputs.RuntimeVirtualMachine']]:
"""
Use a Compute Engine VM image to start the managed notebook instance.
Structure is documented below.
"""
return pulumi.get(self, "virtual_machine")
| 43.899457
| 176
| 0.627205
| 3,194
| 32,310
| 6.166249
| 0.085786
| 0.059203
| 0.060777
| 0.035745
| 0.899924
| 0.884793
| 0.860015
| 0.838132
| 0.829398
| 0.815842
| 0
| 0.002102
| 0.278675
| 32,310
| 735
| 177
| 43.959184
| 0.842959
| 0.479882
| 0
| 0.701754
| 1
| 0
| 0.137936
| 0.056839
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161404
| false
| 0.003509
| 0.024561
| 0
| 0.284211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f3e45bbd5136c95bce872c2460b4822ee7744881
| 3,415
|
py
|
Python
|
tests/test_provider_ellisdon_oss_azuredevops.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
tests/test_provider_ellisdon_oss_azuredevops.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
tests/test_provider_ellisdon_oss_azuredevops.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# tests/test_provider_ellisdon-oss_azuredevops.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:13:00 UTC)
def test_provider_import():
import terrascript.provider.ellisdon_oss.azuredevops
def test_resource_import():
from terrascript.resource.ellisdon_oss.azuredevops import (
azuredevops_build_definition,
)
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_extension
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_project
from terrascript.resource.ellisdon_oss.azuredevops import (
azuredevops_release_definition,
)
from terrascript.resource.ellisdon_oss.azuredevops import (
azuredevops_release_environment,
)
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_release_task
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_release_tasks
from terrascript.resource.ellisdon_oss.azuredevops import (
azuredevops_release_variables,
)
from terrascript.resource.ellisdon_oss.azuredevops import (
azuredevops_service_endpoint,
)
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_service_hook
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_task_group
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_variable_group
def test_datasource_import():
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_agent_queue
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_build_definition
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_group
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_project
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_release_definition
from terrascript.data.ellisdon_oss.azuredevops import (
azuredevops_release_definition_environments,
)
from terrascript.data.ellisdon_oss.azuredevops import (
azuredevops_release_definitions,
)
from terrascript.data.ellisdon_oss.azuredevops import (
azuredevops_release_environment,
)
from terrascript.data.ellisdon_oss.azuredevops import (
azuredevops_release_stage_variables,
)
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_release_tasks
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_service_endpoint
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_source_repository
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_task_group
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_user
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_variable_group
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_variable_groups
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_workflow_task
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.ellisdon_oss.azuredevops
#
# t = terrascript.provider.ellisdon_oss.azuredevops.azuredevops()
# s = str(t)
#
# assert 'https://github.com/ellisdon-oss/terraform-provider-azuredevops' in s
# assert '0.0.2' in s
| 33.811881
| 88
| 0.802343
| 383
| 3,415
| 6.895561
| 0.211488
| 0.141613
| 0.274896
| 0.307459
| 0.811056
| 0.795532
| 0.759939
| 0.755017
| 0.614919
| 0.056797
| 0
| 0.005128
| 0.143485
| 3,415
| 100
| 89
| 34.15
| 0.897778
| 0.156662
| 0
| 0.215686
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01
| 0
| 1
| 0.058824
| true
| 0
| 0.647059
| 0
| 0.705882
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
6d1afebe0ae7729e600f9a8815cf99382e3eb696
| 121
|
py
|
Python
|
homework3/rabindra_cloudmesh_ex2.py
|
futuresystems/465-rahulbindra
|
bcf2ad53a362b894f9547b1af006f1d7bdab76a6
|
[
"Apache-2.0"
] | 1
|
2015-04-12T06:50:15.000Z
|
2015-04-12T06:50:15.000Z
|
homework3/rabindra_cloudmesh_ex2.py
|
futuresystems/465-rahulbindra
|
bcf2ad53a362b894f9547b1af006f1d7bdab76a6
|
[
"Apache-2.0"
] | null | null | null |
homework3/rabindra_cloudmesh_ex2.py
|
futuresystems/465-rahulbindra
|
bcf2ad53a362b894f9547b1af006f1d7bdab76a6
|
[
"Apache-2.0"
] | null | null | null |
import cloudmesh
from pprint import pprint
print cloudmesh.shell("cloud on india")
print cloudmesh.shell("cloud list")
| 17.285714
| 39
| 0.793388
| 17
| 121
| 5.647059
| 0.588235
| 0.291667
| 0.395833
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123967
| 121
| 6
| 40
| 20.166667
| 0.90566
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.75
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 8
|
6d365786d642e18aa770c2c197db874520aeb990
| 3,823
|
py
|
Python
|
skrmt/covariance/tests/test_metrics.py
|
parthe/scikit-rmt
|
fd1d48dd027e3ba5ca3fb3b355ca3de012407286
|
[
"BSD-3-Clause"
] | 8
|
2021-05-06T18:34:19.000Z
|
2022-02-10T11:21:59.000Z
|
skrmt/covariance/tests/test_metrics.py
|
parthe/scikit-rmt
|
fd1d48dd027e3ba5ca3fb3b355ca3de012407286
|
[
"BSD-3-Clause"
] | 1
|
2022-02-02T19:48:36.000Z
|
2022-02-03T09:54:22.000Z
|
skrmt/covariance/tests/test_metrics.py
|
parthe/scikit-rmt
|
fd1d48dd027e3ba5ca3fb3b355ca3de012407286
|
[
"BSD-3-Clause"
] | 2
|
2021-06-17T03:09:14.000Z
|
2022-02-02T19:50:14.000Z
|
'''Metrics Test module
Testing Metrics module
'''
import numpy as np
import pytest
from skrmt.covariance import sample_estimator
from skrmt.covariance import fsopt_estimator
from skrmt.covariance import loss_frobenius, loss_mv, prial_mv
def test_prial_sample():
'''Testing prial evaluated in sample covariance matrix
'''
# population covariance matrix
sigma = np.array([[3.00407916, -1.46190757, 1.50140806, 1.50933526, 0.27036442],
[-1.46190757, 5.61441061, -0.51939653, -2.76492235, 1.38225566],
[1.50140806, -0.51939653, 2.3068582, 1.41248896, 0.84740175],
[1.50933526, -2.76492235, 1.41248896, 6.57182938, 0.73407095],
[0.27036442, 1.38225566, 0.84740175, 0.73407095, 9.50282265]])
p_size, n_size = sigma.shape[0], 3*sigma.shape[0]
# input data matrix
mtx = np.random.multivariate_normal(np.random.randn(p_size), sigma, size=n_size)
sigma_sample = sample_estimator(mtx)
sigma_fsopt = fsopt_estimator(mtx, sigma)
exp_sample = loss_mv(sigma_tilde=sigma_sample, sigma=sigma)
exp_sigma_tilde = loss_mv(sigma_tilde=sigma_sample, sigma=sigma)
exp_fsopt = loss_mv(sigma_tilde=sigma_fsopt, sigma=sigma)
prial = prial_mv(exp_sample=exp_sample, exp_sigma_tilde=exp_sigma_tilde, exp_fsopt=exp_fsopt)
assert prial == 0.0
def test_prial_fsopt():
'''Testing prial evaluated in finite-sample optimal covariance matrix
'''
# population covariance matrix
sigma = np.array([[3.00407916, -1.46190757, 1.50140806, 1.50933526, 0.27036442],
[-1.46190757, 5.61441061, -0.51939653, -2.76492235, 1.38225566],
[1.50140806, -0.51939653, 2.3068582, 1.41248896, 0.84740175],
[1.50933526, -2.76492235, 1.41248896, 6.57182938, 0.73407095],
[0.27036442, 1.38225566, 0.84740175, 0.73407095, 9.50282265]])
p_size, n_size = sigma.shape[0], 3*sigma.shape[0]
# input data matrix
mtx = np.random.multivariate_normal(np.random.randn(p_size), sigma, size=n_size)
sigma_sample = sample_estimator(mtx)
sigma_fsopt = fsopt_estimator(mtx, sigma)
exp_sample = loss_mv(sigma_tilde=sigma_sample, sigma=sigma)
exp_sigma_tilde = loss_mv(sigma_tilde=sigma_fsopt, sigma=sigma)
exp_fsopt = loss_mv(sigma_tilde=sigma_fsopt, sigma=sigma)
prial = prial_mv(exp_sample=exp_sample, exp_sigma_tilde=exp_sigma_tilde, exp_fsopt=exp_fsopt)
assert prial == 1.0
def test_loss_frobenius():
'''Testing Frobenius loss is zero when applied at the same matrix
'''
# population covariance matrix
sigma = np.array([[3.00407916, -1.46190757, 1.50140806, 1.50933526, 0.27036442],
[-1.46190757, 5.61441061, -0.51939653, -2.76492235, 1.38225566],
[1.50140806, -0.51939653, 2.3068582, 1.41248896, 0.84740175],
[1.50933526, -2.76492235, 1.41248896, 6.57182938, 0.73407095],
[0.27036442, 1.38225566, 0.84740175, 0.73407095, 9.50282265]])
assert loss_frobenius(sigma, sigma) == 0
def test_exceptions():
'''Testing loss function raises exception when a non-invertible matrix is introduced
'''
mtx1 = np.zeros((5, 5))
mtx2 = np.array([[3.00407916, -1.46190757, 1.50140806, 1.50933526, 0.27036442],
[-1.46190757, 5.61441061, -0.51939653, -2.76492235, 1.38225566],
[1.50140806, -0.51939653, 2.3068582, 1.41248896, 0.84740175],
[1.50933526, -2.76492235, 1.41248896, 6.57182938, 0.73407095],
[0.27036442, 1.38225566, 0.84740175, 0.73407095, 9.50282265]])
with pytest.raises(ValueError):
loss_mv(mtx1, mtx2)
with pytest.raises(ValueError):
loss_mv(mtx2, mtx1)
| 40.242105
| 97
| 0.654198
| 518
| 3,823
| 4.679537
| 0.162162
| 0.049505
| 0.033003
| 0.039604
| 0.813531
| 0.785479
| 0.759076
| 0.759076
| 0.757013
| 0.754538
| 0
| 0.306944
| 0.220246
| 3,823
| 94
| 98
| 40.670213
| 0.506206
| 0.11797
| 0
| 0.660377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056604
| 1
| 0.075472
| false
| 0
| 0.09434
| 0
| 0.169811
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6d57a8d2892ed0564e965fd301e1159e32d860c3
| 108
|
py
|
Python
|
statreg/model/__init__.py
|
mipt-npm/statreg-py
|
45bd76599fb139ffcc44493d817ed672954e84a1
|
[
"Apache-2.0"
] | null | null | null |
statreg/model/__init__.py
|
mipt-npm/statreg-py
|
45bd76599fb139ffcc44493d817ed672954e84a1
|
[
"Apache-2.0"
] | null | null | null |
statreg/model/__init__.py
|
mipt-npm/statreg-py
|
45bd76599fb139ffcc44493d817ed672954e84a1
|
[
"Apache-2.0"
] | null | null | null |
__all__ = []
from .gauss_error import GaussErrorMatrixUnfolder
from .gauss_error import GaussErrorUnfolder
| 21.6
| 49
| 0.842593
| 11
| 108
| 7.727273
| 0.636364
| 0.211765
| 0.329412
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 108
| 5
| 50
| 21.6
| 0.885417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ed9067859d5d99305bb39d1f331f204b38760bb6
| 22,750
|
py
|
Python
|
booking/tests.py
|
naritotakizawa/django-booking-sample
|
769650d2c574473cc3e56d7fb7135631ce54f4de
|
[
"MIT"
] | 4
|
2020-01-01T14:23:08.000Z
|
2021-05-24T12:11:35.000Z
|
booking/tests.py
|
naritotakizawa/django-booking-sample
|
769650d2c574473cc3e56d7fb7135631ce54f4de
|
[
"MIT"
] | 6
|
2021-04-08T21:17:48.000Z
|
2022-01-13T02:03:22.000Z
|
booking/tests.py
|
naritotakizawa/django-booking-sample
|
769650d2c574473cc3e56d7fb7135631ce54f4de
|
[
"MIT"
] | 1
|
2021-06-23T05:42:15.000Z
|
2021-06-23T05:42:15.000Z
|
import datetime
from django.shortcuts import resolve_url, get_object_or_404
from django.test import TestCase
from django.template.exceptions import TemplateDoesNotExist
from django.utils import timezone
from .models import Schedule, Staff
batu = '×'
maru = '○'
line = '-'
class StoreListViewTests(TestCase):
fixtures = ['initial']
def test_get(self):
"""店舗の一覧が表示されるかテスト"""
response = self.client.get(resolve_url('booking:store_list'))
self.assertQuerysetEqual(response.context['store_list'], ['<Store: 店舗A>', '<Store: 店舗B>', '<Store: 店舗C>'])
class StaffListViewTests(TestCase):
fixtures = ['initial']
def test_store_a(self):
"""店舗Aのスタッフリストの確認"""
response = self.client.get(resolve_url('booking:staff_list', pk=1))
self.assertQuerysetEqual(response.context['staff_list'], ['<Staff: 店舗A - じゃば>', '<Staff: 店舗A - ぱいそん>'])
def test_store_b(self):
"""店舗Bのスタッフリストの確認"""
response = self.client.get(resolve_url('booking:staff_list', pk=2))
self.assertQuerysetEqual(response.context['staff_list'], ['<Staff: 店舗B - じゃんご>'])
def test_store_c(self):
"""店舗Cのスタッフリストの確認。店舗Cには誰もいない"""
response = self.client.get(resolve_url('booking:staff_list', pk=3))
self.assertQuerysetEqual(response.context['staff_list'], [])
class StaffCalendarViewTests(TestCase):
fixtures = ['initial']
def test_no_schedule(self):
"""スケジュールがない場合のカレンダーをテスト。
店名や表示期間と、「☓」がないことを確認。これがあるのはスケジュールがある場合。
"""
start = timezone.localtime()
end = start + datetime.timedelta(days=6)
response = self.client.get(resolve_url('booking:calendar', pk=1))
self.assertContains(response, '店舗A店 ぱいそん')
self.assertContains(response, f'{start.year}年{start.month}月{start.day}日 - {end.year}年{end.month}月{end.day}日')
self.assertContains(response, line)
self.assertContains(response, maru)
self.assertNotContains(response, batu)
def test_one_schedule_next_day_9(self):
"""スケジュールが次の日の9時
スケジュールがあるので、☓がカレンダー内に表示されることを確認
"""
staff = get_object_or_404(Staff, pk=1)
start = timezone.localtime() + datetime.timedelta(days=1)
start = start.replace(hour=9, minute=0, second=0)
end = start + datetime.timedelta(hours=1)
Schedule.objects.create(staff=staff, start=start, end=end, name='テスト')
response = self.client.get(resolve_url('booking:calendar', pk=staff.pk))
self.assertContains(response, line)
self.assertContains(response, maru)
self.assertContains(response, batu)
def test_one_schedule_next_day_8(self):
"""スケジュールが次の日の8時
8時のスケジュールはカレンダーに表示されないので、☓がないことを確認
"""
staff = get_object_or_404(Staff, pk=1)
start = timezone.localtime() + datetime.timedelta(days=1)
start = start.replace(hour=8, minute=0, second=0)
end = start + datetime.timedelta(hours=1)
Schedule.objects.create(staff=staff, start=start, end=end, name='テスト')
response = self.client.get(resolve_url('booking:calendar', pk=staff.pk))
self.assertContains(response, line)
self.assertContains(response, maru)
self.assertNotContains(response, batu)
def test_one_schedule_next_day_17(self):
"""スケジュールが次の日の17時
17時はカレンダーに表示されるので、☓があることを確認
"""
staff = get_object_or_404(Staff, pk=1)
start = timezone.localtime() + datetime.timedelta(days=1)
start = start.replace(hour=17, minute=0, second=0)
end = start + datetime.timedelta(hours=1)
Schedule.objects.create(staff=staff, start=start, end=end, name='テスト')
response = self.client.get(resolve_url('booking:calendar', pk=staff.pk))
self.assertContains(response, line)
self.assertContains(response, maru)
self.assertContains(response, batu)
def test_one_schedule_next_day_18(self):
"""次の日の18時にスケジュール
18時はカレンダー表示されないので、☓がないことを確認
"""
staff = get_object_or_404(Staff, pk=1)
start = timezone.localtime() + datetime.timedelta(days=1)
start = start.replace(hour=18, minute=0, second=0)
end = start + datetime.timedelta(hours=1)
Schedule.objects.create(staff=staff, start=start, end=end, name='テスト')
response = self.client.get(resolve_url('booking:calendar', pk=staff.pk))
self.assertContains(response, line)
self.assertContains(response, maru)
self.assertNotContains(response, batu)
def test_one_schedule_before_day_9(self):
"""前の日の9時にスケジュール
カレンダーは当日から表示なので、前の日のものは表示されない。☓がないことを確認。
"""
staff = get_object_or_404(Staff, pk=1)
start = timezone.localtime() - datetime.timedelta(days=1)
start = start.replace(hour=9, minute=0, second=0)
end = start + datetime.timedelta(hours=1)
Schedule.objects.create(staff=staff, start=start, end=end, name='テスト')
response = self.client.get(resolve_url('booking:calendar', pk=staff.pk))
self.assertContains(response, line)
self.assertContains(response, maru)
self.assertNotContains(response, batu)
def test_one_schedule_next_week_9(self):
"""来週の9時にスケジュール
7日後は表示されない。☓がないことを確認
"""
staff = get_object_or_404(Staff, pk=1)
start = timezone.localtime() + datetime.timedelta(days=7)
start = start.replace(hour=9, minute=0, second=0)
end = start + datetime.timedelta(hours=1)
Schedule.objects.create(staff=staff, start=start, end=end, name='テスト')
response = self.client.get(resolve_url('booking:calendar', pk=staff.pk))
self.assertContains(response, line)
self.assertContains(response, maru)
self.assertNotContains(response, batu)
def test_one_schedule_next_week_9_and_move(self):
"""来週の9時にスケジュール
7日後を基準にカレンダー表示するので、スケジュールは表示される。☓があることを確認。
"""
staff = get_object_or_404(Staff, pk=1)
start = timezone.localtime() + datetime.timedelta(days=7)
start = start.replace(hour=9, minute=0, second=0)
end = start + datetime.timedelta(hours=1)
Schedule.objects.create(staff=staff, start=start, end=end, name='テスト')
response = self.client.get(resolve_url('booking:calendar', pk=staff.pk, year=start.year, month=start.month, day=start.day))
self.assertContains(response, line)
self.assertContains(response, maru)
self.assertContains(response, batu)
end = start + datetime.timedelta(days=6)
self.assertContains(response, '店舗A店 ぱいそん')
self.assertContains(response, f'{start.year}年{start.month}月{start.day}日 - {end.year}年{end.month}月{end.day}日')
class BookingViewTests(TestCase):
fixtures = ['initial']
def test_get(self):
"""予約ページが表示されるかテスト"""
now = timezone.localtime()
response = self.client.get(resolve_url('booking:booking', pk=1, year=now.year, month=now.month, day=now.day, hour=9))
self.assertContains(response, '店舗A店 ぱいそん')
self.assertContains(response, f'{now.year}年{now.month}月{now.day}日 9時に予約')
def test_post(self):
"""予約後に、カレンダーページで☓(予約あり)があることを確認"""
now = timezone.localtime() + datetime.timedelta(days=1)
response = self.client.post(
resolve_url('booking:booking', pk=1, year=now.year, month=now.month, day=now.day, hour=9),
{'name': 'テスト'},
follow=True
)
messages = list(response.context['messages'])
self.assertEqual(messages, [])
self.assertContains(response, batu)
def test_post_exists_data(self):
"""既に埋まった時間に予約した場合に、メッセージ表示があることを確認"""
now = timezone.localtime().replace(hour=9, minute=0, second=0, microsecond=0)
end = now + datetime.timedelta(hours=1)
staff = get_object_or_404(Staff, pk=1)
Schedule.objects.create(staff=staff, start=now, end=end, name='埋めた')
response = self.client.post(
resolve_url('booking:booking', pk=1, year=now.year, month=now.month, day=now.day, hour=9),
{'name': 'これは入らない'},
follow=True
)
messages = list(response.context['messages'])
self.assertEqual(str(messages[0]), 'すみません、入れ違いで予約がありました。別の日時はどうですか。')
class MyPageViewTests(TestCase):
fixtures = ['initial']
def test_anonymous(self):
"""ログインしていない場合、ログインページにリダイレクトされることを確認"""
response = self.client.get(resolve_url('booking:my_page'))
self.assertRedirects(response, '/login/?next=%2Fmypage%2F')
def test_login_admin(self):
"""管理者でログインした場合。店舗スタッフではないので、ナニも表示されない"""
self.client.login(username='admin', password='admin123')
response = self.client.get(resolve_url('booking:my_page'))
self.assertQuerysetEqual(response.context['staff_list'], [])
self.assertQuerysetEqual(response.context['schedule_list'], [])
self.assertContains(response, 'adminのMyPage')
def test_login_tanaka(self):
"""田中でログイン。スタッフデータが表示されることを確認"""
self.client.login(username='tanakataro', password='helloworld123')
response = self.client.get(resolve_url('booking:my_page'))
self.assertQuerysetEqual(response.context['staff_list'], ['<Staff: 店舗B - じゃんご>', '<Staff: 店舗A - ぱいそん>'])
self.assertQuerysetEqual(response.context['schedule_list'], [])
self.assertContains(response, 'tanakataroのMyPage')
def test_login_tanaka_with_schedule(self):
"""田中でログインし、予約がある場合、自分担当の予約だけ表示されるか確認。"""
staff1 = get_object_or_404(Staff, pk=1)
staff2 = get_object_or_404(Staff, pk=2)
staff3 = get_object_or_404(Staff, pk=3)
now = timezone.localtime()
s1 = Schedule.objects.create(staff=staff1, start=now - datetime.timedelta(hours=1), end=now, name='テスト1') # 過去の予約は表示されない
s2 = Schedule.objects.create(staff=staff1, start=now + datetime.timedelta(hours=1), end=now, name='テスト2') # 問題なく表示
s3 = Schedule.objects.create(staff=staff2, start=now + datetime.timedelta(hours=1), end=now, name='テスト3') # 問題なく表示
s4 = Schedule.objects.create(staff=staff3, start=now + datetime.timedelta(hours=1), end=now, name='テスト4') # staff3は、自分じゃない
self.client.login(username='tanakataro', password='helloworld123')
response = self.client.get(resolve_url('booking:my_page'))
self.assertEqual(list(response.context['schedule_list']), [s2, s3])
def test_login_yosida_with_schedule(self):
"""吉田でログインし、予約ある場合、自分担当の予約が表示されるか確認"""
staff1 = get_object_or_404(Staff, pk=1)
staff2 = get_object_or_404(Staff, pk=2)
staff3 = get_object_or_404(Staff, pk=3)
now = timezone.localtime()
s1 = Schedule.objects.create(staff=staff1, start=now - datetime.timedelta(hours=1), end=now, name='テスト1')
s2 = Schedule.objects.create(staff=staff1, start=now + datetime.timedelta(hours=1), end=now, name='テスト2')
s3 = Schedule.objects.create(staff=staff2, start=now + datetime.timedelta(hours=1), end=now, name='テスト3')
s4 = Schedule.objects.create(staff=staff3, start=now + datetime.timedelta(hours=1), end=now, name='テスト4') # 吉田の予約
self.client.login(username='yosidaziro', password='helloworld123')
response = self.client.get(resolve_url('booking:my_page'))
self.assertEqual(list(response.context['schedule_list']), [s4])
self.assertContains(response, 'yosidaziroのMyPage')
class MyPageWithPkViewTests(TestCase):
fixtures = ['initial']
def test_anonymous(self):
"""ログインしていない場合、403の表示"""
response = self.client.get(resolve_url('booking:my_page_with_pk', pk=2))
self.assertEqual(response.status_code, 403)
def test_login_admin(self):
"""スーパーユーザーは、どのユーザーのマイページでも見れる"""
self.client.login(username='admin', password='admin123')
response = self.client.get(resolve_url('booking:my_page_with_pk', pk=2))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'tanakataroのMyPage')
def test_login_tanaka(self):
"""自分自身のマイページは見れる"""
self.client.login(username='tanakataro', password='helloworld123')
response = self.client.get(resolve_url('booking:my_page_with_pk', pk=2))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'tanakataroのMyPage')
def test_login_yosida(self):
"""他人のマイページは見れない"""
self.client.login(username='yosidaziro', password='helloworld123')
response = self.client.get(resolve_url('booking:my_page_with_pk', pk=2))
self.assertEqual(response.status_code, 403)
def test_not_exist_user(self):
"""存在しないユーザーページにスーパーユーザーで行くと、404"""
self.client.login(username='admin', password='admin123')
response = self.client.get(resolve_url('booking:my_page_with_pk', pk=10000))
self.assertEqual(response.status_code, 404)
def test_not_exist_user(self):
"""存在しないユーザーページに一般ユーザーで行くと、403"""
self.client.login(username='tanakataro', password='helloworld123')
response = self.client.get(resolve_url('booking:my_page_with_pk', pk=10000))
self.assertEqual(response.status_code, 403)
class MyPageCalendarViewTests(TestCase):
fixtures = ['initial']
def test_anonymous(self):
"""ログインしていない場合は403"""
response = self.client.get(resolve_url('booking:my_page_calendar', pk=1))
self.assertEqual(response.status_code, 403)
def test_login_admin(self):
"""スーパーユーザーは、誰のカレンダーでも見れる"""
self.client.login(username='admin', password='admin123')
response = self.client.get(resolve_url('booking:my_page_calendar', pk=1))
self.assertEqual(response.status_code, 200)
def test_login_tanaka(self):
"""自分用のカレンダーは見れる"""
self.client.login(username='tanakataro', password='helloworld123')
response = self.client.get(resolve_url('booking:my_page_calendar', pk=1))
self.assertEqual(response.status_code, 200)
start = timezone.localtime()
end = start + datetime.timedelta(days=6)
self.assertContains(response, '店舗A店 ぱいそん')
self.assertContains(response, f'{start.year}年{start.month}月{start.day}日 - {end.year}年{end.month}月{end.day}日')
self.assertContains(response, line)
self.assertContains(response, maru)
self.assertNotContains(response, batu)
def test_login_yosida(self):
"""他人のカレンダーは見れない"""
self.client.login(username='yosidaziro', password='helloworld123')
response = self.client.get(resolve_url('booking:my_page_calendar', pk=1))
self.assertEqual(response.status_code, 403)
class MyPageDayDetailViewTests(TestCase):
fixtures = ['initial']
def test_no_schedule(self):
"""店舗や日にちが正しく表示されるかの確認"""
self.client.login(username='tanakataro', password='helloworld123')
staff = get_object_or_404(Staff, pk=1)
now = timezone.localtime().replace(hour=9, minute=0, second=0)
response = self.client.get(resolve_url('booking:my_page_day_detail', pk=staff.pk, year=now.year, month=now.month, day=now.day))
self.assertContains(response, '店舗A店 ぱいそん')
self.assertContains(response, f'{now.year}年{now.month}月{now.day}日の予約一覧')
def test_one_schedule_9(self):
"""予約が正しく表示されることを確認"""
self.client.login(username='tanakataro', password='helloworld123')
staff = get_object_or_404(Staff, pk=1)
now = timezone.localtime().replace(hour=9, minute=0, second=0)
Schedule.objects.create(staff=staff, start=now, end=now, name='テスト')
response = self.client.get(resolve_url('booking:my_page_day_detail', pk=staff.pk, year=now.year, month=now.month, day=now.day))
self.assertContains(response, 'テスト')
def test_one_schedule_23(self):
"""時間外の予約は表示されないことを確認"""
self.client.login(username='tanakataro', password='helloworld123')
staff = get_object_or_404(Staff, pk=1)
now = timezone.localtime().replace(hour=23, minute=0, second=0)
Schedule.objects.create(staff=staff, start=now, end=now, name='テスト')
response = self.client.get(resolve_url('booking:my_page_day_detail', pk=staff.pk, year=now.year, month=now.month, day=now.day))
self.assertNotContains(response, 'テスト')
class MyPageScheduleViewTests(TestCase):
fixtures = ['initial']
def test_anonymous(self):
"""ログインしていないと403"""
now = timezone.now()
staff = get_object_or_404(Staff, pk=1)
s1 = Schedule.objects.create(staff=staff, start=now, end=now, name='テスト')
response = self.client.get(resolve_url('booking:my_page_schedule', pk=s1.pk))
self.assertEqual(response.status_code, 403)
def test_login_admin(self):
"""管理者は誰の予約でも詳細ページが見れる"""
self.client.login(username='admin', password='admin123')
now = timezone.now()
staff = get_object_or_404(Staff, pk=1)
s1 = Schedule.objects.create(staff=staff, start=now, end=now, name='テスト')
response = self.client.get(resolve_url('booking:my_page_schedule', pk=s1.pk))
self.assertContains(response, '店舗A店 ぱいそん')
def test_login_tanaka(self):
"""自分担当の予約は、詳細ページが見れる"""
self.client.login(username='tanakataro', password='helloworld123')
now = timezone.now()
staff = get_object_or_404(Staff, pk=1)
s1 = Schedule.objects.create(staff=staff, start=now, end=now, name='テスト')
response = self.client.get(resolve_url('booking:my_page_schedule', pk=s1.pk))
self.assertContains(response, '店舗A店 ぱいそん')
def test_login_yosida(self):
"""自分の担当じゃない予約は、詳細ページが見れない(403)"""
self.client.login(username='yosidaziro', password='helloworld123')
now = timezone.now()
staff = get_object_or_404(Staff, pk=1)
s1 = Schedule.objects.create(staff=staff, start=now, end=now, name='テスト')
response = self.client.get(resolve_url('booking:my_page_schedule', pk=s1.pk))
self.assertEqual(response.status_code, 403)
def test_post(self):
"""予約の更新を行い、反映されるかのテスト"""
self.client.login(username='tanakataro', password='helloworld123')
now = timezone.now() + datetime.timedelta(days=1)
staff = get_object_or_404(Staff, pk=1)
s1 = Schedule.objects.create(staff=staff, start=now, end=now, name='テスト')
now_str = now.strftime('%Y-%m-%d %H:%M:%S')
response = self.client.post(
resolve_url('booking:my_page_schedule', pk=s1.pk),
{'name': '更新しました', 'start': now_str, 'end': now_str},
follow=True
)
self.assertEqual(list(response.context['schedule_list']), [s1])
class MyPageScheduleDeleteViewTests(TestCase):
fixtures = ['initial']
def test_get(self):
"""予約の削除ページ。GETアクセスは想定していないので、TemplateDoesNotExist"""
self.client.login(username='tanakataro', password='helloworld123')
now = timezone.now() + datetime.timedelta(days=1)
staff = get_object_or_404(Staff, pk=1)
s1 = Schedule.objects.create(staff=staff, start=now, end=now, name='テスト')
with self.assertRaises(TemplateDoesNotExist):
response = self.client.get(resolve_url('booking:my_page_schedule_delete', pk=s1.pk),)
def test_post(self):
"""予約を削除すると当然、マイページの一覧には表示されなくなる"""
self.client.login(username='tanakataro', password='helloworld123')
now = timezone.now() + datetime.timedelta(days=1)
staff = get_object_or_404(Staff, pk=1)
s1 = Schedule.objects.create(staff=staff, start=now, end=now, name='テスト')
response = self.client.post(
resolve_url('booking:my_page_schedule_delete', pk=s1.pk),
follow=True
)
self.assertEqual(list(response.context['schedule_list']), [])
class MyPageHolidayAddViewTests(TestCase):
fixtures = ['initial']
def test_anonymous(self):
"""ログインしていないと403"""
now = timezone.now()
staff = get_object_or_404(Staff, pk=1)
response = self.client.post(
resolve_url('booking:my_page_holiday_add', pk=staff.pk, year=now.year, month=now.month, day=now.day, hour=9),
follow=True,
)
self.assertEqual(response.status_code, 403)
def test_login_admin(self):
"""スーパーユーザーは、休日追加を自由に行える"""
self.client.login(username='admin', password='admin123')
now = timezone.now()
staff = get_object_or_404(Staff, pk=1)
response = self.client.post(
resolve_url('booking:my_page_holiday_add', pk=staff.pk, year=now.year, month=now.month, day=now.day, hour=9),
follow=True,
)
self.assertContains(response, '休暇(システムによる追加)')
self.assertEqual(response.status_code, 200)
def test_login_tanaka(self):
"""自分で休日を追加できることを確認"""
self.client.login(username='tanakataro', password='helloworld123')
now = timezone.now()
staff = get_object_or_404(Staff, pk=1)
response = self.client.post(
resolve_url('booking:my_page_holiday_add', pk=staff.pk, year=now.year, month=now.month, day=now.day, hour=9),
follow=True,
)
self.assertContains(response, '休暇(システムによる追加)')
self.assertEqual(response.status_code, 200)
def test_login_yosida(self):
"""他人の休日は追加できないことを確認"""
self.client.login(username='yosidaziro', password='helloworld123')
now = timezone.now()
staff = get_object_or_404(Staff, pk=1)
response = self.client.post(
resolve_url('booking:my_page_holiday_add', pk=staff.pk, year=now.year, month=now.month, day=now.day, hour=9),
follow=True,
)
self.assertEqual(response.status_code, 403)
def test_get(self):
"""GETでアクセスできないことを確認"""
self.client.login(username='admin', password='admin123')
now = timezone.now()
staff = get_object_or_404(Staff, pk=1)
response = self.client.get(
resolve_url('booking:my_page_holiday_add', pk=staff.pk, year=now.year, month=now.month, day=now.day, hour=9),
follow=True,
)
self.assertEqual(response.status_code, 405)
| 45.318725
| 136
| 0.648044
| 2,674
| 22,750
| 5.385565
| 0.094989
| 0.048608
| 0.056246
| 0.053955
| 0.842719
| 0.828484
| 0.817304
| 0.798
| 0.778557
| 0.735921
| 0
| 0.025057
| 0.215868
| 22,750
| 501
| 137
| 45.409182
| 0.781602
| 0.056703
| 0
| 0.756906
| 0
| 0.008287
| 0.120775
| 0.047203
| 0
| 0
| 0
| 0
| 0.226519
| 1
| 0.124309
| false
| 0.069061
| 0.016575
| 0
| 0.201657
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
edd2dc1d4e2c2868670ee4b7dd5aff4b4eb25a0d
| 17,813
|
py
|
Python
|
bioptim/dynamics/dynamics_functions.py
|
vennand/BiorbdOptim
|
9c741da6366e0c9d5d727da6297a17a599759eb1
|
[
"MIT"
] | null | null | null |
bioptim/dynamics/dynamics_functions.py
|
vennand/BiorbdOptim
|
9c741da6366e0c9d5d727da6297a17a599759eb1
|
[
"MIT"
] | null | null | null |
bioptim/dynamics/dynamics_functions.py
|
vennand/BiorbdOptim
|
9c741da6366e0c9d5d727da6297a17a599759eb1
|
[
"MIT"
] | null | null | null |
from casadi import vertcat, MX
import biorbd
class DynamicsFunctions:
"""
Different dynamics types
"""
@staticmethod
def custom(states, controls, parameters, nlp):
qdot, qddot = nlp.problem_type["dynamic"](states, controls, parameters, nlp)
return vertcat(qdot, qddot)
@staticmethod
def forward_dynamics_torque_driven(states, controls, parameters, nlp):
"""
Forward dynamics (q, qdot, qddot -> tau) with external forces driven by joint torques (controls).
:param states: States. (MX.sym from CasADi)
:param controls: Controls. (MX.sym from CasADi)
:param nlp: An OptimalControlProgram class.
:param parameters: The MX associated to the parameters
:return: Vertcat of derived states. (MX.sym from CasADi)
"""
DynamicsFunctions.apply_parameters(parameters, nlp)
q, qdot, tau = DynamicsFunctions.dispatch_q_qdot_tau_data(states, controls, nlp)
q_dot = nlp.model.computeQdot(q, qdot).to_mx()
qdot_reduced = nlp.mapping["q"].reduce.map(q_dot)
if nlp.external_forces:
dxdt = MX(nlp.nx, nlp.ns)
for i, f_ext in enumerate(nlp.external_forces):
qddot = nlp.model.ForwardDynamics(q, qdot, tau, f_ext).to_mx()
qddot_reduced = nlp.mapping["q_dot"].reduce.map(qddot)
dxdt[:, i] = vertcat(qdot_reduced, qddot_reduced)
else:
qddot = nlp.model.ForwardDynamics(q, qdot, tau).to_mx()
qddot_reduced = nlp.mapping["q_dot"].reduce.map(qddot)
dxdt = vertcat(qdot_reduced, qddot_reduced)
return dxdt
@staticmethod
def forward_dynamics_torque_driven_with_contact(states, controls, parameters, nlp):
"""
Forward dynamics (q, qdot, qddot -> tau) with contact force driven by joint torques (controls).
:param states: States. (MX.sym from CasADi)
:param controls: Controls. (MX.sym from CasADi)
:param nlp: An OptimalControlProgram class.
:return: Vertcat of derived states. (MX.sym from CasADi)
"""
DynamicsFunctions.apply_parameters(parameters, nlp)
q, qdot, tau = DynamicsFunctions.dispatch_q_qdot_tau_data(states, controls, nlp)
qddot = biorbd.Model.ForwardDynamicsConstraintsDirect(nlp.model, q, qdot, tau).to_mx()
q_dot = nlp.model.computeQdot(q, qdot).to_mx()
qdot_reduced = nlp.mapping["q"].reduce.map(q_dot)
qddot_reduced = nlp.mapping["q_dot"].reduce.map(qddot)
return vertcat(qdot_reduced, qddot_reduced)
@staticmethod
def forces_from_forward_dynamics_with_contact_for_torque_driven_problem(states, controls, parameters, nlp):
"""
Returns contact forces computed from forward dynamics with contact force
(forward_dynamics_torque_driven_with_contact)
:param states: States. (MX.sym from CasADi)
:param controls: Controls. (MX.sym from CasADi)
:param nlp: An OptimalControlProgram class.
:return: Contact forces. (MX.sym from CasADi)
"""
DynamicsFunctions.apply_parameters(parameters, nlp)
q, qdot, tau = DynamicsFunctions.dispatch_q_qdot_tau_data(states, controls, nlp)
cs = nlp.model.getConstraints()
biorbd.Model.ForwardDynamicsConstraintsDirect(nlp.model, q, qdot, tau, cs)
return cs.getForce().to_mx()
@staticmethod
def forces_from_forward_dynamics_with_contact_for_torque_activation_driven_problem(
states, controls, parameters, nlp
):
"""
Returns contact forces computed from forward dynamics with contact force
(forward_dynamics_torque_driven_with_contact)
:param states: States. (MX.sym from CasADi)
:param controls: Controls. (MX.sym from CasADi)
:param nlp: An OptimalControlProgram class.
:return: Contact forces. (MX.sym from CasADi)
"""
DynamicsFunctions.apply_parameters(parameters, nlp)
q, qdot, torque_act = DynamicsFunctions.dispatch_q_qdot_tau_data(states, controls, nlp)
tau = nlp.model.torque(torque_act, q, qdot).to_mx()
cs = nlp.model.getConstraints()
biorbd.Model.ForwardDynamicsConstraintsDirect(nlp.model, q, qdot, tau, cs)
return cs.getForce().to_mx()
@staticmethod
def forward_dynamics_torque_activations_driven(states, controls, parameters, nlp):
DynamicsFunctions.apply_parameters(parameters, nlp)
q, qdot, torque_act = DynamicsFunctions.dispatch_q_qdot_tau_data(states, controls, nlp)
tau = nlp.model.torque(torque_act, q, qdot).to_mx()
qddot = nlp.model.ForwardDynamics(q, qdot, tau).to_mx()
q_dot = nlp.model.computeQdot(q, qdot).to_mx()
qdot_reduced = nlp.mapping["q"].reduce.map(q_dot)
qddot_reduced = nlp.mapping["q_dot"].reduce.map(qddot)
return vertcat(qdot_reduced, qddot_reduced)
@staticmethod
def forward_dynamics_torque_activations_driven_with_contact(states, controls, parameters, nlp):
DynamicsFunctions.apply_parameters(parameters, nlp)
q, qdot, torque_act = DynamicsFunctions.dispatch_q_qdot_tau_data(states, controls, nlp)
tau = nlp.model.torque(torque_act, q, qdot).to_mx()
qddot = nlp.model.ForwardDynamicsConstraintsDirect(q, qdot, tau).to_mx()
q_dot = nlp.model.computeQdot(q, qdot).to_mx()
qdot_reduced = nlp.mapping["q"].reduce.map(q_dot)
qddot_reduced = nlp.mapping["q_dot"].reduce.map(qddot)
return vertcat(qdot_reduced, qddot_reduced)
@staticmethod
def forward_dynamics_torque_muscle_driven(states, controls, parameters, nlp):
"""
Forward dynamics (q, qdot, qddot -> tau) without external forces driven by joint torques and muscles (controls).
:param states: States. (MX.sym from CasADi)
:param controls: Controls. (MX.sym from CasADi)
:param nlp: An OptimalControlProgram class.
:return: Vertcat of derived states. (MX.sym from CasADi)
"""
DynamicsFunctions.apply_parameters(parameters, nlp)
q, qdot, residual_tau = DynamicsFunctions.dispatch_q_qdot_tau_data(states, controls, nlp)
muscles_states = biorbd.VecBiorbdMuscleState(nlp.shape["muscle"])
muscles_activations = controls[nlp.shape["tau"] :]
for k in range(nlp.shape["muscle"]):
muscles_states[k].setActivation(muscles_activations[k])
muscles_tau = nlp.model.muscularJointTorque(muscles_states, q, qdot).to_mx()
tau = muscles_tau + residual_tau
qddot = biorbd.Model.ForwardDynamics(nlp.model, q, qdot, tau).to_mx()
q_dot = nlp.model.computeQdot(q, qdot).to_mx()
qdot_reduced = nlp.mapping["q"].reduce.map(q_dot)
qddot_reduced = nlp.mapping["q_dot"].reduce.map(qddot)
return vertcat(qdot_reduced, qddot_reduced)
@staticmethod
def forward_dynamics_muscle_activations_and_torque_driven_with_contact(states, controls, parameters, nlp):
"""
Forward dynamics (q, qdot, qddot -> tau) with contact force driven by joint torques and muscles (controls).
:param states: Sates. (MX.sym from CasADi)
:param controls: Controls. (MX.sym from CasADi)
:param nlp: An OptimalControlProgram class.
:return: Vertcat of derived states. (MX.sym from CasADi)
"""
DynamicsFunctions.apply_parameters(parameters, nlp)
q, qdot, residual_tau = DynamicsFunctions.dispatch_q_qdot_tau_data(states, controls, nlp)
muscles_states = biorbd.VecBiorbdMuscleState(nlp.shape["muscle"])
muscles_activations = controls[nlp.shape["tau"] :]
for k in range(nlp.shape["muscle"]):
muscles_states[k].setActivation(muscles_activations[k])
muscles_tau = nlp.model.muscularJointTorque(muscles_states, q, qdot).to_mx()
tau = muscles_tau + residual_tau
qddot = biorbd.Model.ForwardDynamicsConstraintsDirect(nlp.model, q, qdot, tau).to_mx()
q_dot = nlp.model.computeQdot(q, qdot).to_mx()
qdot_reduced = nlp.mapping["q"].reduce.map(q_dot)
qddot_reduced = nlp.mapping["q_dot"].reduce.map(qddot)
return vertcat(qdot_reduced, qddot_reduced)
@staticmethod
def forces_from_forward_dynamics_muscle_activations_and_torque_driven_with_contact(
states, controls, parameters, nlp
):
"""
Returns contact forces computed from forward dynamics with contact force
(forward_dynamics_torque_muscle_driven_with_contact)
:param states: States. (MX.sym from CasADi)
:param controls: Controls. (MX.sym from CasADi)
:param nlp: An OptimalControlProgram class.
:return: Contact forces. (MX.sym from CasADi)
"""
DynamicsFunctions.apply_parameters(parameters, nlp)
q, qdot, residual_tau = DynamicsFunctions.dispatch_q_qdot_tau_data(states, controls, nlp)
muscles_states = biorbd.VecBiorbdMuscleState(nlp.shape["muscle"])
muscles_activations = controls[nlp.shape["tau"] :]
for k in range(nlp.shape["muscle"]):
muscles_states[k].setActivation(muscles_activations[k])
muscles_tau = nlp.model.muscularJointTorque(muscles_states, q, qdot).to_mx()
tau = muscles_tau + residual_tau
cs = nlp.model.getConstraints()
biorbd.Model.ForwardDynamicsConstraintsDirect(nlp.model, q, qdot, tau, cs)
return cs.getForce().to_mx()
@staticmethod
def forward_dynamics_muscle_activations_driven(states, controls, parameters, nlp):
DynamicsFunctions.apply_parameters(parameters, nlp)
nq = nlp.mapping["q"].reduce.len
q = nlp.mapping["q"].expand.map(states[:nq])
qdot = nlp.mapping["q_dot"].expand.map(states[nq:])
muscles_states = biorbd.VecBiorbdMuscleState(nlp.shape["muscle"])
muscles_activations = controls
for k in range(nlp.shape["muscle"]):
muscles_states[k].setActivation(muscles_activations[k])
muscles_tau = nlp.model.muscularJointTorque(muscles_states, q, qdot).to_mx()
qddot = biorbd.Model.ForwardDynamicsConstraintsDirect(nlp.model, q, qdot, muscles_tau).to_mx()
q_dot = nlp.model.computeQdot(q, qdot).to_mx()
qdot_reduced = nlp.mapping["q"].reduce.map(q_dot)
qddot_reduced = nlp.mapping["q_dot"].reduce.map(qddot)
return vertcat(qdot_reduced, qddot_reduced)
@staticmethod
def forward_dynamics_muscle_excitations_driven(states, controls, parameters, nlp):
"""
Forward dynamics (q, qdot, qddot -> tau) without external forces driven by muscle excitation (controls).
:param states: States. (MX.sym from CasADi)
:param controls: Controls. (MX.sym from CasADi)
:param nlp: An OptimalControlProgram class.
:return: Vertcat of derived states. (MX.sym from CasADi)
"""
DynamicsFunctions.apply_parameters(parameters, nlp)
nq = nlp.mapping["q"].reduce.len
q = nlp.mapping["q"].expand.map(states[:nq])
qdot = nlp.mapping["q_dot"].expand.map(states[nq:])
muscles_states = biorbd.VecBiorbdMuscleState(nlp.shape["muscle"])
muscles_excitation = controls
muscles_activations = states[nlp.shape["q"] + nlp.shape["q_dot"] :]
for k in range(nlp.shape["muscle"]):
muscles_states[k].setExcitation(muscles_excitation[k])
muscles_states[k].setActivation(muscles_activations[k])
muscles_activations_dot = nlp.model.activationDot(muscles_states).to_mx()
muscles_tau = nlp.model.muscularJointTorque(muscles_states, q, qdot).to_mx()
qddot = biorbd.Model.ForwardDynamicsConstraintsDirect(nlp.model, q, qdot, muscles_tau).to_mx()
q_dot = nlp.model.computeQdot(q, qdot).to_mx()
qdot_reduced = nlp.mapping["q"].reduce.map(q_dot)
qddot_reduced = nlp.mapping["q_dot"].reduce.map(qddot)
return vertcat(qdot_reduced, qddot_reduced, muscles_activations_dot)
@staticmethod
def forward_dynamics_muscle_excitations_and_torque_driven(states, controls, parameters, nlp):
"""
Forward dynamics (q, qdot, qddot -> tau) without external forces driven by muscle excitation
and joint torques (controls).
:param states: States. (MX.sym from CasADi)
:param controls: Controls. (MX.sym from CasADi)
:param nlp: An OptimalControlProgram class.
:return: Vertcat of derived states. (MX.sym from CasADi)
"""
DynamicsFunctions.apply_parameters(parameters, nlp)
q, qdot, residual_tau = DynamicsFunctions.dispatch_q_qdot_tau_data(states, controls, nlp)
muscles_states = biorbd.VecBiorbdMuscleState(nlp.shape["muscle"])
muscles_excitation = controls[nlp.shape["tau"] :]
muscles_activations = states[nlp.shape["q"] + nlp.shape["q_dot"] :]
for k in range(nlp.shape["muscle"]):
muscles_states[k].setExcitation(muscles_excitation[k])
muscles_states[k].setActivation(muscles_activations[k])
muscles_activations_dot = nlp.model.activationDot(muscles_states).to_mx()
muscles_tau = nlp.model.muscularJointTorque(muscles_states, q, qdot).to_mx()
tau = muscles_tau + residual_tau
qddot = biorbd.Model.ForwardDynamicsConstraintsDirect(nlp.model, q, qdot, tau).to_mx()
q_dot = nlp.model.computeQdot(q, qdot).to_mx()
qdot_reduced = nlp.mapping["q"].reduce.map(q_dot)
qddot_reduced = nlp.mapping["q_dot"].reduce.map(qddot)
return vertcat(qdot_reduced, qddot_reduced, muscles_activations_dot)
@staticmethod
def forward_dynamics_muscle_excitations_and_torque_driven_with_contact(states, controls, parameters, nlp):
"""
Forward dynamics (q, qdot, qddot -> tau) with contact force driven by muscle excitation and
joint torques (controls).
:param states: States. (MX.sym from CasADi)
:param controls: Controls. (MX.sym from CasADi)
:param nlp: An OptimalControlProgram class.
:return: Vertcat of derived states. (MX.sym from CasADi)
"""
DynamicsFunctions.apply_parameters(parameters, nlp)
q, qdot, residual_tau = DynamicsFunctions.dispatch_q_qdot_tau_data(states, controls, nlp)
muscles_states = biorbd.VecBiorbdMuscleState(nlp.shape["muscle"])
muscles_excitation = controls[nlp.shape["tau"] :]
muscles_activations = states[nlp.shape["q"] + nlp.shape["q_dot"] :]
for k in range(nlp.shape["muscle"]):
muscles_states[k].setExcitation(muscles_excitation[k])
muscles_states[k].setActivation(muscles_activations[k])
muscles_activations_dot = nlp.model.activationDot(muscles_states).to_mx()
muscles_tau = nlp.model.muscularJointTorque(muscles_states, q, qdot).to_mx()
tau = muscles_tau + residual_tau
qddot = biorbd.Model.ForwardDynamicsConstraintsDirect(nlp.model, q, qdot, tau).to_mx()
q_dot = nlp.model.computeQdot(q, qdot).to_mx()
qdot_reduced = nlp.mapping["q"].reduce.map(q_dot)
qddot_reduced = nlp.mapping["q_dot"].reduce.map(qddot)
return vertcat(qdot_reduced, qddot_reduced, muscles_activations_dot)
@staticmethod
def forces_from_forward_dynamics_muscle_excitations_and_torque_driven_with_contact(
states, controls, parameters, nlp
):
"""
Returns contact forces computed from forward dynamics with contact force
(forward_dynamics_muscle_excitations_and_torque_driven_with_contact)
:param states: States. (MX.sym from CasADi)
:param controls: Controls. (MX.sym from CasADi)
:param nlp: An OptimalControlProgram class.
:return: Contact forces. (MX.sym from CasADi)
"""
DynamicsFunctions.apply_parameters(parameters, nlp)
q, qdot, residual_tau = DynamicsFunctions.dispatch_q_qdot_tau_data(states, controls, nlp)
muscles_states = biorbd.VecBiorbdMuscleState(nlp.shape["muscle"])
muscles_excitation = controls[nlp.shape["tau"] :]
muscles_activations = states[nlp.shape["q"] + nlp.shape["q_dot"] :]
for k in range(nlp.shape["muscle"]):
muscles_states[k].setExcitation(muscles_excitation[k])
muscles_states[k].setActivation(muscles_activations[k])
muscles_tau = nlp.model.muscularJointTorque(muscles_states, q, qdot).to_mx()
tau = muscles_tau + residual_tau
cs = nlp.model.getConstraints()
biorbd.Model.ForwardDynamicsConstraintsDirect(nlp.model, q, qdot, tau, cs)
return cs.getForce().to_mx()
@staticmethod
def dispatch_q_qdot_tau_data(states, controls, nlp):
"""
Returns q, qdot, tau (unreduced by a potential symmetry) and qdot_reduced
from states, controls and mapping through nlp to condense this code.
:param states: States. (MX.sym from CasADi)
:param controls: Controls. (MX.sym from CasADi)
:param nlp: An OptimalControlProgram class.
:return: q -> Generalized coordinates positions. (MX.sym from CasADi),
qdot -> Generalized coordinates velocities. (MX.sym from CasADi) and
tau -> Joint torques. (MX.sym from CasADi)
"""
nq = nlp.mapping["q"].reduce.len
q = nlp.mapping["q"].expand.map(states[:nq])
qdot = nlp.mapping["q_dot"].expand.map(states[nq:])
tau = nlp.mapping["tau"].expand.map(controls[: nlp.shape["tau"]])
return q, qdot, tau
@staticmethod
def apply_parameters(mx, nlp):
for key in nlp.parameters_to_optimize:
param = nlp.parameters_to_optimize[key]
# Call the pre dynamics function
if param["func"]:
param["func"](nlp.model, mx, **param["extra_params"])
| 46.267532
| 120
| 0.679897
| 2,146
| 17,813
| 5.450606
| 0.054054
| 0.029922
| 0.029238
| 0.04873
| 0.925024
| 0.918868
| 0.911088
| 0.904249
| 0.901086
| 0.890656
| 0
| 0
| 0.213383
| 17,813
| 384
| 121
| 46.388021
| 0.834784
| 0.219952
| 0
| 0.814815
| 0
| 0
| 0.01956
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078704
| false
| 0
| 0.009259
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
edd6133ec21d64fe6ebd245320a6e855dce7552d
| 76
|
py
|
Python
|
__init__.py
|
neurodatascience/UKBB_scripts
|
4e8b74685028d76f2e45aa83c649c3eeb4134ec2
|
[
"Apache-2.0"
] | 1
|
2020-08-28T08:47:33.000Z
|
2020-08-28T08:47:33.000Z
|
__init__.py
|
neurodatascience/UKBB_scripts
|
4e8b74685028d76f2e45aa83c649c3eeb4134ec2
|
[
"Apache-2.0"
] | 1
|
2020-01-22T21:31:34.000Z
|
2020-01-22T21:31:34.000Z
|
__init__.py
|
neurodatascience/UKBB_scripts
|
4e8b74685028d76f2e45aa83c649c3eeb4134ec2
|
[
"Apache-2.0"
] | 2
|
2020-01-22T18:45:09.000Z
|
2020-08-28T08:49:00.000Z
|
import UKBB_scripts.REL_DICT
import UKBB_scripts.ukbb_structure_conversion
| 19
| 45
| 0.907895
| 11
| 76
| 5.818182
| 0.636364
| 0.3125
| 0.53125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065789
| 76
| 3
| 46
| 25.333333
| 0.901408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
61015321a49fb76d16a59f10a69c21f0beb08e6e
| 91
|
py
|
Python
|
aernetworking/utils.py
|
Aermoss/AerNetworking
|
a5334e2762be9a32ff1146b2c3b0435a07f9b49e
|
[
"MIT"
] | 1
|
2021-09-12T11:43:15.000Z
|
2021-09-12T11:43:15.000Z
|
aernetworking/utils.py
|
Aermoss/AerNetworking
|
a5334e2762be9a32ff1146b2c3b0435a07f9b49e
|
[
"MIT"
] | null | null | null |
aernetworking/utils.py
|
Aermoss/AerNetworking
|
a5334e2762be9a32ff1146b2c3b0435a07f9b49e
|
[
"MIT"
] | null | null | null |
import socket
def get_local_ip():
return socket.gethostbyname(socket.gethostname())
| 22.75
| 53
| 0.758242
| 11
| 91
| 6.090909
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 91
| 4
| 53
| 22.75
| 0.858974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
b61f4e6cf3bc674f3901ca5169d9684d35c6d7fa
| 9,436
|
py
|
Python
|
server/lib/python/cartodb_services/test/refactor/service/test_mapzen_geocoder_config.py
|
digideskio/dataservices-api
|
246ec135dbeaa3f9a52717fdac50a4ab040ce22b
|
[
"BSD-3-Clause"
] | 22
|
2016-03-11T17:33:31.000Z
|
2021-02-22T04:00:43.000Z
|
server/lib/python/cartodb_services/test/refactor/service/test_mapzen_geocoder_config.py
|
digideskio/dataservices-api
|
246ec135dbeaa3f9a52717fdac50a4ab040ce22b
|
[
"BSD-3-Clause"
] | 338
|
2016-02-16T16:13:13.000Z
|
2022-03-30T15:50:17.000Z
|
server/lib/python/cartodb_services/test/refactor/service/test_mapzen_geocoder_config.py
|
CartoDB/dataservices-api
|
d0f28cc002ef11df9f371d5d1fd2d0901c245f97
|
[
"BSD-3-Clause"
] | 14
|
2016-09-22T15:29:33.000Z
|
2021-02-08T03:46:40.000Z
|
from unittest import TestCase
from mockredis import MockRedis
from datetime import datetime
from cartodb_services.refactor.service.mapzen_geocoder_config import *
from cartodb_services.refactor.storage.redis_config import *
from cartodb_services.refactor.storage.mem_config import InMemoryConfigStorage
class TestMapzenGeocoderUserConfig(TestCase):
def setUp(self):
self._redis_connection = MockRedis()
self._server_config = InMemoryConfigStorage({"server_conf": {"environment": "testing"},
"mapzen_conf":
{"geocoder":
{"api_key": "search-xxxxxxx", "monthly_quota": 1500000, "service":{"base_url":"http://base"}}
}, "logger_conf": {}})
self._username = 'test_user'
self._user_key = "rails:users:{0}".format(self._username)
self._user_config = RedisUserConfigStorageBuilder(self._redis_connection,
self._username).get()
self._org_config = RedisOrgConfigStorageBuilder(self._redis_connection,
None).get()
self._set_default_config_values()
def test_config_values_are_ok(self):
config = MapzenGeocoderConfigBuilder(self._server_config,
self._user_config,
self._org_config,
self._username,
None).get()
assert config.geocoding_quota == 100
assert config.soft_geocoding_limit == False
assert config.period_end_date == datetime.strptime('2016-12-31 00:00:00', "%Y-%m-%d %H:%M:%S")
assert config.service_type == 'geocoder_mapzen'
assert config.provider == 'mapzen'
assert config.is_high_resolution == True
assert config.cost_per_hit == 0
assert config.mapzen_api_key == 'search-xxxxxxx'
assert config.username == 'test_user'
assert config.organization is None
def test_quota_should_be_0_if_redis_value_is_0(self):
self._redis_connection.hset(self._user_key, 'geocoding_quota', '0')
config = MapzenGeocoderConfigBuilder(self._server_config,
self._user_config,
self._org_config,
self._username,
None).get()
assert config.geocoding_quota == 0
def test_quota_should_be_0_if_redis_value_is_empty_string(self):
self._redis_connection.hset(self._user_key, 'geocoding_quota', '')
config = MapzenGeocoderConfigBuilder(self._server_config,
self._user_config,
self._org_config,
self._username,
None).get()
assert config.geocoding_quota == 0
def test_soft_limit_should_be_true(self):
self._redis_connection.hset(self._user_key, 'soft_geocoding_limit', 'true')
config = MapzenGeocoderConfigBuilder(self._server_config,
self._user_config,
self._org_config,
self._username,
None).get()
assert config.soft_geocoding_limit == True
def test_soft_limit_should_be_false_if_is_empty_string(self):
self._redis_connection.hset(self._user_key, 'soft_geocoding_limit', '')
config = MapzenGeocoderConfigBuilder(self._server_config,
self._user_config,
self._org_config,
self._username,
None).get()
assert config.soft_geocoding_limit == False
def _set_default_config_values(self):
self._redis_connection.hset(self._user_key, 'geocoding_quota', '100')
self._redis_connection.hset(self._user_key, 'soft_geocoding_limit', 'false')
self._redis_connection.hset(self._user_key, 'period_end_date', '2016-12-31 00:00:00')
def test_config_service_values(self):
config = MapzenGeocoderConfigBuilder(self._server_config,
self._user_config,
self._org_config,
self._username,
None).get()
assert config.service_params == {"base_url":"http://base"}
class TestMapzenGeocoderOrgConfig(TestCase):
def setUp(self):
self._redis_connection = MockRedis()
self._server_config = InMemoryConfigStorage({"server_conf": {"environment": "testing"},
"mapzen_conf":
{"geocoder":
{"api_key": "search-xxxxxxx", "monthly_quota": 1500000}
}, "logger_conf": {}})
self._username = 'test_user'
self._organization = 'test_org'
self._user_key = "rails:users:{0}".format(self._username)
self._org_key = "rails:orgs:{0}".format(self._organization)
self._user_config = RedisUserConfigStorageBuilder(self._redis_connection,
self._username).get()
self._org_config = RedisOrgConfigStorageBuilder(self._redis_connection,
self._organization).get()
self._set_default_config_values()
def test_config_org_values_are_ok(self):
config = MapzenGeocoderConfigBuilder(self._server_config,
self._user_config,
self._org_config,
self._username,
self._organization).get()
assert config.geocoding_quota == 200
assert config.soft_geocoding_limit == False
assert config.period_end_date == datetime.strptime('2016-12-31 00:00:00', "%Y-%m-%d %H:%M:%S")
assert config.service_type == 'geocoder_mapzen'
assert config.provider == 'mapzen'
assert config.is_high_resolution == True
assert config.cost_per_hit == 0
assert config.mapzen_api_key == 'search-xxxxxxx'
assert config.username == 'test_user'
assert config.organization is 'test_org'
def test_quota_should_be_0_if_redis_value_is_0(self):
self._redis_connection.hset(self._org_key, 'geocoding_quota', '0')
config = MapzenGeocoderConfigBuilder(self._server_config,
self._user_config,
self._org_config,
self._username,
self._organization).get()
assert config.geocoding_quota == 0
def test_quota_should_use_user_quota_value_if_redis_value_is_empty_string(self):
self._redis_connection.hset(self._org_key, 'geocoding_quota', '')
config = MapzenGeocoderConfigBuilder(self._server_config,
self._user_config,
self._org_config,
self._username,
self._organization).get()
assert config.geocoding_quota == 100
def test_quota_should_be_0_if_both_user_and_org_have_empty_string(self):
self._redis_connection.hset(self._user_key, 'geocoding_quota', '')
self._redis_connection.hset(self._org_key, 'geocoding_quota', '')
config = MapzenGeocoderConfigBuilder(self._server_config,
self._user_config,
self._org_config,
self._username,
self._organization).get()
assert config.geocoding_quota == 0
def _set_default_config_values(self):
self._redis_connection.hset(self._user_key, 'geocoding_quota', '100')
self._redis_connection.hset(self._user_key, 'soft_geocoding_limit', 'false')
self._redis_connection.hset(self._user_key, 'period_end_date', '2016-12-15 00:00:00')
self._redis_connection.hset(self._org_key, 'geocoding_quota', '200')
self._redis_connection.hset(self._org_key, 'period_end_date', '2016-12-31 00:00:00')
def test_config_default_service_values(self):
config = MapzenGeocoderConfigBuilder(self._server_config,
self._user_config,
self._org_config,
self._username,
self._organization).get()
assert config.service_params == {}
| 55.181287
| 147
| 0.532323
| 853
| 9,436
| 5.45721
| 0.118406
| 0.070892
| 0.089796
| 0.079055
| 0.906122
| 0.900537
| 0.889366
| 0.844468
| 0.844468
| 0.816327
| 0
| 0.020546
| 0.386181
| 9,436
| 170
| 148
| 55.505882
| 0.783149
| 0
| 0
| 0.768212
| 0
| 0
| 0.085947
| 0
| 0
| 0
| 0
| 0
| 0.192053
| 1
| 0.099338
| false
| 0
| 0.039735
| 0
| 0.152318
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fcd2b012845b6cc7aab1b0acc7aaa534dcf8eadd
| 4,995
|
py
|
Python
|
gym-foodhunting/gym_foodhunting/__init__.py
|
yoshi-ono/PyLIS
|
c6e1df91f4f7828837a4ce0bb04e29adeb450ff3
|
[
"Apache-2.0"
] | 18
|
2019-05-15T08:31:01.000Z
|
2022-01-22T13:21:54.000Z
|
gym-foodhunting/gym_foodhunting/__init__.py
|
yoshi-ono/PyLIS
|
c6e1df91f4f7828837a4ce0bb04e29adeb450ff3
|
[
"Apache-2.0"
] | 2
|
2019-05-08T08:39:58.000Z
|
2021-07-28T09:14:27.000Z
|
gym-foodhunting/gym_foodhunting/__init__.py
|
yoshi-ono/PyLIS
|
c6e1df91f4f7828837a4ce0bb04e29adeb450ff3
|
[
"Apache-2.0"
] | 7
|
2019-11-24T02:28:11.000Z
|
2022-03-14T17:08:51.000Z
|
from gym.envs.registration import register
from gym_foodhunting.foodhunting.gym_foodhunting import R2D2, R2D2Simple, R2D2Discrete, HSR, HSRSimple, HSRDiscrete
# FoodHunting R2D2
register(
id='FoodHunting-v0',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=50,
kwargs={'render': False, 'robot_model': R2D2Simple, 'max_steps': 50, 'num_foods': 2, 'num_fakes': 0, 'object_size': 0.5, 'object_radius_scale': 1.0, 'object_radius_offset': 1.0, 'object_angle_scale': 1.0}
)
register(
id='FoodHuntingGUI-v0',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=50,
kwargs={'render': True, 'robot_model': R2D2Simple, 'max_steps': 50, 'num_foods': 2, 'num_fakes': 0, 'object_size': 0.5, 'object_radius_scale': 1.0, 'object_radius_offset': 1.0, 'object_angle_scale': 1.0}
)
register(
id='FoodHuntingDiscrete-v0',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=50,
kwargs={'render': False, 'robot_model': R2D2Discrete, 'max_steps': 50, 'num_foods': 2, 'num_fakes': 0, 'object_size': 0.5, 'object_radius_scale': 1.0, 'object_radius_offset': 1.0, 'object_angle_scale': 1.0}
)
register(
id='FoodHuntingDiscreteGUI-v0',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=50,
kwargs={'render': True, 'robot_model': R2D2Discrete, 'max_steps': 50, 'num_foods': 2, 'num_fakes': 0, 'object_size': 0.5, 'object_radius_scale': 1.0, 'object_radius_offset': 1.0, 'object_angle_scale': 1.0}
)
# FoodHunting HSR
register(
id='FoodHuntingHSR-v0',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=50,
kwargs={'render': False, 'robot_model': HSRSimple, 'max_steps': 50, 'num_foods': 2, 'num_fakes': 0, 'object_size': 0.5, 'object_radius_scale': 1.0, 'object_radius_offset': 1.0, 'object_angle_scale': 1.0}
)
register(
id='FoodHuntingHSRGUI-v0',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=50,
kwargs={'render': True, 'robot_model': HSRSimple, 'max_steps': 50, 'num_foods': 2, 'num_fakes': 0, 'object_size': 0.5, 'object_radius_scale': 1.0, 'object_radius_offset': 1.0, 'object_angle_scale': 1.0}
)
register(
id='FoodHuntingHSR-v1',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=50,
kwargs={'render': False, 'robot_model': HSRSimple, 'max_steps': 50, 'num_foods': 1, 'num_fakes': 1, 'object_size': 0.5, 'object_radius_scale': 0.0, 'object_radius_offset': 1.5, 'object_angle_scale': 0.25}
)
register(
id='FoodHuntingHSRGUI-v1',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=50,
kwargs={'render': True, 'robot_model': HSRSimple, 'max_steps': 50, 'num_foods': 1, 'num_fakes': 0, 'object_size': 0.5, 'object_radius_scale': 0.0, 'object_radius_offset': 1.5, 'object_angle_scale': 0.25}
)
register(
id='FoodHuntingHSRDiscrete-v0',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=50,
kwargs={'render': False, 'robot_model': HSRDiscrete, 'max_steps': 50, 'num_foods': 2, 'num_fakes': 0, 'object_size': 0.5, 'object_radius_scale': 1.0, 'object_radius_offset': 1.0, 'object_angle_scale': 1.0}
)
register(
id='FoodHuntingHSRDiscreteGUI-v0',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=50,
kwargs={'render': True, 'robot_model': HSRDiscrete, 'max_steps': 50, 'num_foods': 2, 'num_fakes': 0, 'object_size': 0.5, 'object_radius_scale': 1.0, 'object_radius_offset': 1.0, 'object_angle_scale': 1.0}
)
register(
id='FoodHuntingHSRDiscrete-v1',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=50,
kwargs={'render': False, 'robot_model': HSRDiscrete, 'max_steps': 50, 'num_foods': 1, 'num_fakes': 1, 'object_size': 0.5, 'object_radius_scale': 0.0, 'object_radius_offset': 1.5, 'object_angle_scale': 0.25}
)
register(
id='FoodHuntingHSRDiscreteGUI-v1',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=50,
kwargs={'render': True, 'robot_model': HSRDiscrete, 'max_steps': 50, 'num_foods': 1, 'num_fakes': 1, 'object_size': 0.5, 'object_radius_scale': 0.0, 'object_radius_offset': 1.5, 'object_angle_scale': 0.25}
)
register(
id='FoodHuntingHSRTestGUI-v0',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=10000,
kwargs={'render': True, 'robot_model': HSR, 'max_steps': 10000, 'num_foods': 5, 'num_fakes': 5, 'object_size': 0.5, 'object_radius_scale': 1.0, 'object_radius_offset': 1.0, 'object_angle_scale': 1.0}
)
register(
id='FoodHuntingHSRTestGUI-v1',
entry_point='gym_foodhunting.foodhunting:FoodHuntingEnv',
max_episode_steps=10000,
kwargs={'render': True, 'robot_model': HSR, 'max_steps': 10000, 'num_foods': 1, 'num_fakes': 1, 'object_size': 0.5, 'object_radius_scale': 0.0, 'object_radius_offset': 1.5, 'object_angle_scale': 0.25}
)
| 48.495146
| 210
| 0.722122
| 695
| 4,995
| 4.884892
| 0.07482
| 0.065979
| 0.037113
| 0.098969
| 0.865096
| 0.865096
| 0.865096
| 0.865096
| 0.865096
| 0.865096
| 0
| 0.055027
| 0.115916
| 4,995
| 102
| 211
| 48.970588
| 0.713768
| 0.006406
| 0
| 0.488372
| 0
| 0
| 0.496371
| 0.159073
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.023256
| 0
| 0.023256
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fcfcc4924a1eea0625c1cba1b47e0c8a3a98edd0
| 11,668
|
py
|
Python
|
terraform_compliance/steps/then/it_must_contain_something.py
|
Miouge1/cli
|
c40a7901f237c5606f3f7afb3c8ad3ef69e716a5
|
[
"MIT"
] | 866
|
2017-04-03T09:05:27.000Z
|
2021-01-01T07:43:32.000Z
|
terraform_compliance/steps/then/it_must_contain_something.py
|
Miouge1/cli
|
c40a7901f237c5606f3f7afb3c8ad3ef69e716a5
|
[
"MIT"
] | 405
|
2017-04-02T16:51:44.000Z
|
2021-01-04T10:45:05.000Z
|
terraform_compliance/steps/then/it_must_contain_something.py
|
Miouge1/cli
|
c40a7901f237c5606f3f7afb3c8ad3ef69e716a5
|
[
"MIT"
] | 110
|
2017-04-03T08:53:02.000Z
|
2020-12-21T14:09:34.000Z
|
# -*- coding: utf-8 -*-
from terraform_compliance.common.helper import (
seek_key_in_dict, # importing this purely because the unit tests require it to exist in global scope
Null
)
from terraform_compliance.common.error_handling import Error
def it_must_contain_something(_step_obj, something, inherited_values=Null, child=False):
match = _step_obj.context.match
seek_key_in_dict, seek_regex_key_in_dict_values = match.seek_key_in_dict, match.seek_regex_key_in_dict_values
prop_list = []
_step_obj.context.stash = inherited_values if inherited_values is not Null else _step_obj.context.stash
if _step_obj.context.type in ('resource', 'data'):
for resource in _step_obj.context.stash:
if not isinstance(resource, dict) \
or 'values' not in resource \
or 'address' not in resource \
or 'type' not in resource:
resource = {'values': resource,
'address': resource,
'type': _step_obj.context.name}
# not going to use match.get here because the following line is an edge case
values = resource.get('values', resource.get('expressions', {}))
if not values:
values = seek_key_in_dict(resource, something)
found_values = []
found_key = Null # this could also become a list
resource_passed = False
# set this to True if you get anything from the resource, don't set it to False if you get empty values as there could be other values as well
if isinstance(values, dict):
found_key = match.get(values, something, Null)
if found_key is not Null:
found_key = [{something: found_key}]
else:
found_key = seek_key_in_dict(values, something)
for kv_pair in found_key:
# kv_pair must be in {something: found_key} format.
if not isinstance(kv_pair, dict):
continue # should raise exception
# ignore the values that correspond to Null
# Following line could be problematic, how to determine if something is set to be empty or not set? Behavior is provider dependent.
# For now, allow '' and don't allow [] as per user cases.
if match.get(kv_pair, something) not in ([],):
found_values.append(match.get(kv_pair, something))
resource_passed = True
elif isinstance(values, list):
for value in values:
if isinstance(value, dict):
# First search in the keys
found_key = seek_key_in_dict(value, something)
# The following is an edge case that covers things like aws asg tags (https://www.terraform.io/docs/providers/aws/r/autoscaling_group.html)
# Then search in the values with 'key'
if not found_key:
found_key = seek_regex_key_in_dict_values(value, 'key', something)
if found_key:
found_key = found_key[0]
# not going to use match.get here because the following line is an edge case
found_values.extend(value.get('value'))
resource_passed = True
continue
elif isinstance(value, list):
_, temp_found_values = it_must_contain_something(_step_obj, something, value, child=True)
prop_list.extend(temp_found_values)
resource_passed = True
elif isinstance(value, (str, bool, int, float)):
if match.equals(value, something):
found_values.append(value)
resource_passed = True
if found_key is not Null and len(found_key):
for found_key_instance in found_key:
if isinstance(found_key_instance, dict):
if match.get(found_key_instance, something, Null) not in (Null, [], '', {}):
found_values.append(match.get(found_key_instance, something))
resource_passed = True
for i, found_val in enumerate(found_values):
if isinstance(found_val, dict) and 'constant_value' in found_val:
found_values[i] = found_val['constant_value']
for found_val in found_values:
prop_list.append({'address': resource['address'],
'values': found_val,
'type': _step_obj.context.name})
# do not check prop list here because every resource should contain it.
if not resource_passed and not child: # if nothing was found in this resource, don't error if you're a child
Error(_step_obj, '{} ({}) does not have {} property.'.format(resource['address'],
resource.get('type', ''),
something))
if prop_list:
_step_obj.context.stash = prop_list
_step_obj.context.property_name = something
return something, prop_list
elif _step_obj.context.type == 'provider':
prop_list = []
for provider_data in _step_obj.context.stash:
values = seek_key_in_dict(provider_data, something)
if values:
prop_list.extend(values)
_step_obj.context.property_name = something
_step_obj.context.address = '{}.{}'.format(provider_data.get('name', _step_obj.context.addresses),
provider_data.get('alias', "\b"))
else:
Error(_step_obj, '{} {} does not have {} property.'.format(_step_obj.context.addresses,
_step_obj.context.type,
something))
if prop_list:
_step_obj.context.stash = prop_list
return True
Error(_step_obj, '{} {} does not have {} property.'.format(_step_obj.context.addresses,
_step_obj.context.type,
something))
def it_must_not_contain_something(_step_obj, something, inherited_values=Null):
match = _step_obj.context.match
seek_key_in_dict, seek_regex_key_in_dict_values = match.seek_key_in_dict, match.seek_regex_key_in_dict_values
prop_list = []
_step_obj.context.stash = inherited_values if inherited_values is not Null else _step_obj.context.stash
if _step_obj.context.type in ('resource', 'data'):
for resource in _step_obj.context.stash:
if not isinstance(resource, dict) \
or 'values' not in resource \
or 'address' not in resource \
or 'type' not in resource:
resource = {'values': resource,
'address': resource,
'type': _step_obj.context.name}
values = resource.get('values', resource.get('expressions', {}))
if not values:
values = seek_key_in_dict(resource, something)
found_values = []
found_key = Null
resource_passed = False
# set this to True if you get anything from the resource, don't set it to False if you get empty values as there could be other values as well
if isinstance(values, dict):
found_key = match.get(values, something, Null)
if found_key is not Null:
found_key = [{something: found_key}]
else:
found_key = seek_key_in_dict(values, something)
for kv_pair in found_key:
# kv_pair must be in {something: found_key} format.
if not isinstance(kv_pair, dict):
continue # could raise an exception
# ignore the values that correspond to Null
# Following line could be problematic, how to determine if something is set to be empty or not set? Behavior is provider dependent.
# For now, allow '' and don't allow [] as per user cases.
if match.get(kv_pair, something) not in ([],):
found_values.append(match.get(kv_pair, something))
resource_passed = True
elif isinstance(values, list):
for value in values:
if isinstance(value, dict):
# First search in the keys
found_key = seek_key_in_dict(value, something)
# Then search in the values with 'key'
if not found_key:
found_key = seek_regex_key_in_dict_values(value, 'key', something)
if found_key:
found_key = found_key[0]
found_values.extend(value.get('value'))
resource_passed = True
continue
elif isinstance(value, list):
_, temp_found_values = it_must_contain_something(_step_obj, something, value, child=True)
prop_list.extend(temp_found_values)
resource_passed = True
elif isinstance(value, (str, bool, int, float)):
if match.equals(value, something):
found_values.append(value)
resource_passed = True
if found_key is not Null and len(found_key):
for found_key_instance in found_key:
if isinstance(found_key_instance, dict):
if match.get(found_key_instance, something, Null) not in (Null, [], '', {}):
found_values.append(match.get(found_key_instance, something))
resource_passed = True
for i, found_val in enumerate(found_values):
if isinstance(found_val, dict) and 'constant_value' in found_val:
found_values[i] = found_val['constant_value']
if resource_passed:
Error(_step_obj, '{} property exists in {} ({}).'.format(something, resource['address'], resource.get('type', '')))
elif _step_obj.context.type == 'provider':
for provider_data in _step_obj.context.stash:
values = seek_key_in_dict(provider_data, something)
if values:
Error(_step_obj, '{} {} does not have {} property.'.format(_step_obj.context.addresses,
_step_obj.context.type,
something))
| 50.077253
| 163
| 0.52434
| 1,252
| 11,668
| 4.646166
| 0.126198
| 0.057762
| 0.069795
| 0.029053
| 0.862816
| 0.849407
| 0.827059
| 0.826027
| 0.802132
| 0.802132
| 0
| 0.000432
| 0.404268
| 11,668
| 232
| 164
| 50.293103
| 0.836426
| 0.134813
| 0
| 0.872727
| 0
| 0
| 0.043297
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012121
| false
| 0.084848
| 0.012121
| 0
| 0.036364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
1e00557e5fdea75a7a2c2a7bfca405f20f1f82c1
| 60,259
|
py
|
Python
|
atomsci/ddm/utils/data_curation_functions.py
|
vgutta/AMPL
|
46759aa84fd6acfc14facad0e14cb05a43d2e309
|
[
"MIT"
] | 77
|
2019-11-17T01:15:36.000Z
|
2021-10-19T07:51:03.000Z
|
atomsci/ddm/utils/data_curation_functions.py
|
vgutta/AMPL
|
46759aa84fd6acfc14facad0e14cb05a43d2e309
|
[
"MIT"
] | 39
|
2019-12-16T22:21:54.000Z
|
2021-09-30T16:31:12.000Z
|
atomsci/ddm/utils/data_curation_functions.py
|
vgutta/AMPL
|
46759aa84fd6acfc14facad0e14cb05a43d2e309
|
[
"MIT"
] | 41
|
2019-11-24T03:40:32.000Z
|
2021-08-17T22:06:07.000Z
|
"""
data_curation_functions.py
Extract Kevin's functions for curation of public datasets
Modify them to match Jonathan's curation methods in notebook
01/30/2020
"""
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib_venn import venn3
import seaborn as sns
import pdb
from atomsci.ddm.utils.struct_utils import base_smiles_from_smiles
import atomsci.ddm.utils.datastore_functions as dsf
#from atomsci.ddm.utils import datastore_functions as dsf
from atomsci.ddm.utils import curate_data as curate
import atomsci.ddm.utils.struct_utils as struct_utils
import atomsci.ddm.utils.curate_data as curate_data, imp
def set_data_root(dir):
'''Set global variables for data directories
Creates paths for DTC and Excape given a root data directory.
Global variables 'data_root' and 'data_dirs'. 'data_root' is the
root data directory. 'data_dirs' is a dictionary that maps 'DTC' and 'Excape'
to directores calcuated from 'data_root'
Args:
dir (str): root data directory containing folds 'dtc' and 'excape'
Returns:
None
'''
global data_root, data_dirs
data_root = dir
#data_dirs = dict(ChEMBL = '%s/ChEMBL' % data_root, DTC = '%s/DTC' % data_root,
# Excape = '%s/Excape' % data_root)
data_dirs = dict(DTC = '%s/dtc' % data_root,
Excape = '%s/excape' % data_root)
log_var_map = {
'IC50': 'pIC50',
'AC50': 'pIC50',
'Solubility': 'logSolubility',
'CL': 'logCL'
}
pub_dsets = dict(
CYP2D6 = dict(IC50='cyp2d6'),
CYP3A4 = dict(IC50='cyp3a4'),
JAK1 = dict(IC50="jak1"),
JAK2 = dict(IC50="jak2"),
JAK3 = dict(IC50="jak3"),
)
# ----------------------------------------------------------------------------------------------------------------------
# Generic functions for all datasets
# ----------------------------------------------------------------------------------------------------------------------
# Note: Functions freq_table and labeled_freq_table have been moved to ddm.utils.curate_data module.
# ----------------------------------------------------------------------------------------------------------------------
def standardize_relations(dset_df, db='DTC'):
""" Standardizes censoring operators
Standardize the censoring operators to =, < or >, and remove any rows whose operators
don't map to a standard one. There is a special case for db='ChEMBL' that strips
the extra "'"s around relationship symbols. Assumes relationship columns are
'Standard Relation' and 'standard_relation' for ChEMBL and DTC respectively.
This function makes the following mappings: ">" to ">", ">=" to ">", "<" to "<",
"<=" to "<", and "=" to "=". All other relations are removed from the DataFrame.
Args:
dset_df (DataFrame): Input DataFrame. Must contain either 'Standard Relation'
or 'standard_relation'
db (str): Source database. Must be either 'DTC' or 'ChEMBL'
Returns:
DataFrame: Dataframe with the standardized relationship sybmols
"""
relation_cols = dict(ChEMBL='Standard Relation', DTC='standard_relation')
rel_col = relation_cols[db]
dset_df[rel_col].fillna('=', inplace=True)
ops = dset_df[rel_col].values
if db == 'ChEMBL':
# Remove annoying quotes around operators
ops = [op.lstrip("'").rstrip("'") for op in ops]
op_dict = {
">": ">",
">=": ">",
"<": "<",
"<=": "<",
"=": "="
}
ops = np.array([op_dict.get(op, "@") for op in ops])
dset_df[rel_col] = ops
dset_df = dset_df[dset_df[rel_col] != "@"]
return dset_df
# ----------------------------------------------------------------------------------------------------------------------
# DTC-specific curation functions
# ----------------------------------------------------------------------------------------------------------------------
def upload_file_dtc_raw_data(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,file_path,
data_origin='journal', species='human',
force_update=False):
"""Uploads raw DTC data to the datastore
Upload a raw dataset to the datastore from the given DataFrame.
Returns the datastore OID of the uploaded dataset. The dataset is uploaded to the
public bucket and lists https://doi.org/10.1016/j.chembiol.2017.11.009' as the doi.
This also assumes that the id_col is 'compound_id'
Args:
dset_name (str): Name of the dataset. Should not include a file extension.
title (str): title of the file in (human friendly format)
description (str): long text box to describe file (background/use notes)
tags (list): Must be a list of strings.
functional_area (str): The functional area.
target (str): The target.
target_type (str): The target type of the dataset.
activity (str): The activity of the dataset.
assay_category (str): The assay category of the dataset.
file_path (str): The filepath of the dataset.
data_origin (str): The origin of the dataset e.g. journal.
species (str): The species of the dataset e.g. human, rat, dog.
force_update (bool): Overwrite existing datasets in the datastore.
Returns:
str: datastore OID of the uploaded dataset.
"""
bucket = 'public'
filename = '%s.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category':assay_category,
'assay_endpoint' : 'multiple values',
'curation_level': 'raw',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id'
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
#uploaded_file = dsf.upload_df_to_DS(dset_df, bucket, filename=filename, title=title,
# description=description,
# tags=tags, key_values=kv, client=None, dataset_key=dataset_key,
# override_check=True, return_metadata=True)
uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename,
title = title, description=description, tags=tags, key_values=kv, client=None,
dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def filter_dtc_data(orig_df,geneNames):
""" Extracts and post processes JAK1, 2, and 3 datasets from DTC
This is specific to the DTC database.
Extract JAK1, 2 and 3 datasets from Drug Target Commons database, filtered for data usability.
filter criteria:
gene_names == JAK1 | JAK2 | JAK3
InChi key not missing
standard_type IC50
units NM
standard_relation mappable to =, < or >
wildtype_or_mutant != 'mutated'
valid SMILES
maps to valid RDKit base SMILES
standard_value not missing
pIC50 > 3
Args:
orig_df (DataFrame): Input DataFrame. Must contain the following columns: gene_names
standard_inchi_key, standard_type, standard_units, standard_value, compound_id,
wildtype_or_mutant.
geneNames (list): A list of gene names to filter out of orig_df e.g. ['JAK1', 'JAK2'].
Returns:
DataFrame: The filtered rows of the orig_df
"""
dset_df = orig_df[orig_df.gene_names.isin(geneNames) &
~(orig_df.standard_inchi_key.isna()) &
(orig_df.standard_type == 'IC50') &
(orig_df.standard_units == 'NM') &
~orig_df.standard_value.isna() &
~orig_df.compound_id.isna() &
(orig_df.wildtype_or_mutant != 'mutated') ]
return dset_df
def ic50topic50(x) :
"""Calculates pIC50 from IC50
Calculates pIC50 from IC50
Args:
x (float): An IC50.
Returns:
float: The pIC50.
"""
print(x)
return -np.log10((x/1000000000.0))
def down_select(df,kv_lst) :
"""Filters rows given a set of values
Given a DataFrame and a list of tuples columns (k) to values (v), this function
filters out all rows where df[k] == v.
Args:
df (DataFrame): An input DataFrame.
kv_list (list): A list of tuples of (column, value)
Returns:
DataFrame: Rows where all df[k] == v
"""
for k,v in kv_lst :
df=df[df[k]==v]
return df
def get_smiles_dtc_data(nm_df,targ_lst,save_smiles_df):
"""Returns SMILES strings from DTC data
nm_df must be a DataFrame from DTC with the following columns: gene_names,
standard_type, standard_value, 'standard_inchi_key', and standard_relation.
This function selects all rows where nm_df['gene_names'] is in targ_lst,
nm_df['standard_type']=='IC50', nm_df['standard_relation']=='=', and
'standard_value' > 0.
Then pIC50 values are calculated and added to the 'PIC50' column, and
smiles strings are merged in from save_smiles_df
Args:
nm_df (DataFrame): Input DataFrame.
targ_lst (list): A list of targets.
save_smiles_df (DataFrame): A DataFrame with the column 'standard_inchi_key'
Returns:
list, list: A list of smiles and a list of inchi keys shared between targets.
"""
save_df={}
for targ in targ_lst :
lst1= [ ('gene_names',targ),('standard_type','IC50'),('standard_relation','=') ]
lst1_tmp= [ ('gene_names',targ),('standard_type','IC50')]
jak1_df=down_select(nm_df,lst1)
jak1_df_tmp=down_select(nm_df,lst1_tmp)
print(targ,"distinct compounds = only",jak1_df['standard_inchi_key'].nunique())
print(targ,"distinct compounds <,>,=",jak1_df_tmp['standard_inchi_key'].nunique())
## we convert to log values so make sure there are no 0 values
save_df[targ]=jak1_df_tmp[jak1_df_tmp['standard_value']>0]
prev_targ=targ_lst[0]
shared_inchi_keys=save_df[prev_targ]['standard_inchi_key']
for it in range(1,len(targ_lst),1) :
curr_targ=targ_lst[it]
df=save_df[curr_targ]
shared_inchi_keys=df[df['standard_inchi_key'].isin(shared_inchi_keys)]['standard_inchi_key']
print("num shared compounds",shared_inchi_keys.nunique())
lst=[]
for targ in targ_lst :
df=save_df[targ]
#print(aurka_df.shape,aurkb_df.shape, shared_inchi_keys.shape)
lst.append(df[df['standard_inchi_key'].isin(shared_inchi_keys)])
shared_df=pd.concat(lst)
# Add pIC50 values
print('Add pIC50 values.')
print(shared_df['standard_value'])
shared_df['PIC50']=shared_df['standard_value'].apply(ic50topic50)
# Merge in SMILES strings
print('Merge in SMILES strings.')
smiles_lst=[]
for targ in targ_lst :
df=save_df[targ]
df['PIC50']=df['standard_value'].apply(ic50topic50)
smiles_df=df.merge(save_smiles_df,on='standard_inchi_key',suffixes=('_'+targ,'_'))
#the file puts the SMILES string in quotes, which need to be removed
smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
smiles_df['rdkit_smiles']=smiles_df['smiles'].apply(struct_utils.base_smiles_from_smiles)
smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
print(smiles_df.shape)
print(smiles_df['standard_inchi_key'].nunique())
smiles_lst.append(smiles_df)
return smiles_lst, shared_inchi_keys
def get_smiles_4dtc_data(nm_df,targ_lst,save_smiles_df):
"""Returns SMILES strings from DTC data
nm_df must be a DataFrame from DTC with the following columns: gene_names,
standard_type, standard_value, 'standard_inchi_key', and standard_relation.
This function selects all rows where nm_df['gene_names'] is in targ_lst,
nm_df['standard_type']=='IC50', nm_df['standard_relation']=='=', and
'standard_value' > 0.
Then pIC50 values are calculated and added to the 'PIC50' column, and
smiles strings are merged in from save_smiles_df
Args:
nm_df (DataFrame): Input DataFrame.
targ_lst (list): A list of targets.
save_smiles_df (DataFrame): A DataFrame with the column 'standard_inchi_key'
Returns:
list, list, str: A list of smiles. A list of inchi keys shared between targets.
And a description of the targets
"""
save_df={}
description_str = ""
for targ in targ_lst :
lst1= [ ('gene_names',targ),('standard_type','IC50'),('standard_relation','=') ]
lst1_tmp= [ ('gene_names',targ),('standard_type','IC50')]
jak1_df=down_select(nm_df,lst1)
jak1_df_tmp=down_select(nm_df,lst1_tmp)
print(targ,"distinct compounds = only",jak1_df['standard_inchi_key'].nunique())
print(targ,"distinct compounds <,>,=",jak1_df_tmp['standard_inchi_key'].nunique())
description = '''
# '''+targ+" distinct compounds = only: "+str(jak1_df['standard_inchi_key'].nunique())+'''
# '''+targ+" distinct compounds <,>,=: "+str(jak1_df_tmp['standard_inchi_key'].nunique())
description_str += description
#to ignore censored data
#save_df[targ]=jak1_df
#to include censored data
save_df[targ]=jak1_df_tmp
prev_targ=targ_lst[0]
shared_inchi_keys=save_df[prev_targ]['standard_inchi_key']
for it in range(1,len(targ_lst),1) :
curr_targ=targ_lst[it]
df=save_df[curr_targ]
shared_inchi_keys=df[df['standard_inchi_key'].isin(shared_inchi_keys)]['standard_inchi_key']
print("num shared compounds",shared_inchi_keys.nunique())
lst=[]
for targ in targ_lst :
df=save_df[targ]
#print(aurka_df.shape,aurkb_df.shape, shared_inchi_keys.shape)
lst.append(df[df['standard_inchi_key'].isin(shared_inchi_keys)])
shared_df=pd.concat(lst)
# Add pIC50 values
print('Add pIC50 values.')
shared_df['PIC50']=shared_df['standard_value'].apply(ic50topic50)
# Merge in SMILES strings
print('Merge in SMILES strings.')
smiles_lst=[]
for targ in targ_lst :
df=save_df[targ]
df['PIC50']=df['standard_value'].apply(ic50topic50)
smiles_df=df.merge(save_smiles_df,on='standard_inchi_key',suffixes=('_'+targ,'_'))
#the file puts the SMILES string in quotes, which need to be removed
smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
smiles_df['rdkit_smiles']=smiles_df['smiles'].apply(struct_utils.base_smiles_from_smiles)
smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
print("Shape of dataframe:", smiles_df.shape)
print("Number of unique standard_inchi_key:", smiles_df['standard_inchi_key'].nunique())
smiles_lst.append(smiles_df)
return smiles_lst, shared_inchi_keys, description_str
def upload_df_dtc_smiles(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,smiles_df,orig_fileID,
data_origin='journal', species='human',
force_update=False):
"""Uploads DTC smiles data to the datastore
Upload a raw dataset to the datastore from the given DataFrame.
Returns the datastore OID of the uploaded dataset. The dataset is uploaded to the
public bucket and lists https://doi.org/10.1016/j.chembiol.2017.11.009' as the doi.
This also assumes that the id_col is 'compound_id'
Args:
dset_name (str): Name of the dataset. Should not include a file extension.
title (str): title of the file in (human friendly format)
description (str): long text box to describe file (background/use notes)
tags (list): Must be a list of strings.
functional_area (str): The functional area.
target (str): The target.
target_type (str): The target type of the dataset.
activity (str): The activity of the dataset.
assay_category (str): The assay category of the dataset.
smiles_df (DataFrame): DataFrame containing SMILES to be uploaded.
orig_fileID (str): Source file id used to generate smiles_df.
data_origin (str): The origin of the dataset e.g. journal.
species (str): The species of the dataset e.g. human, rat, dog.
force_update (bool): Overwrite existing datasets in the datastore.
Returns:
str: datastore OID of the uploaded dataset.
"""
bucket = 'public'
filename = '%s_dtc_smiles.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'raw',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id',
'source_file_id' : orig_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=smiles_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def atom_curation(targ_lst, smiles_lst, shared_inchi_keys):
"""Apply ATOM standard 'curation' step to "shared_df"
Apply ATOM standard 'curation' step to "shared_df": Average replicate assays,
remove duplicates and drop cases with large variance between replicates.
mleqonly
Args:
targ_lst (list): A list of targets.
smiles_lst (list): A list of DataFrames.
These DataFrames must contain the columns gene_names, standard_type,
standard_relation, standard_inchi_key, PIC50, and rdkit_smiles
shared_inchi_keys (list): A list of inchi keys used in this dataset.
Returns:
list, list:A list of curated DataFrames and a list of the number of compounds
dropped during the curation process for each target.
"""
imp.reload(curate_data)
tolerance=10
column='PIC50'; #'standard_value'
list_bad_duplicates='No'
max_std=1
curated_lst=[]
num_dropped_lst=[]
#print(targ_lst)
#print(smiles_lst)
for it in range(len(targ_lst)) :
data=smiles_lst[it]
data = data[data.standard_relation.str.strip() == '=']
print("gene_names",data.gene_names.unique())
print("standard_type",data.standard_type.unique())
print("standard_relation",data.standard_relation.unique())
print("before",data.shape)
curated_df=curate_data.average_and_remove_duplicates (column, tolerance, list_bad_duplicates, data, max_std, compound_id='standard_inchi_key',smiles_col='rdkit_smiles')
# (Yaru) Remove inf in curated_df
curated_df = curated_df[~curated_df.isin([np.inf]).any(1)]
# (Yaru) Remove nan on rdkit_smiles
curated_df = curated_df.dropna(subset=['rdkit_smiles'])
curated_lst.append(curated_df)
prev_cmpd_cnt=shared_inchi_keys.nunique()
num_dropped=prev_cmpd_cnt-curated_df.shape[0]
num_dropped_lst.append(num_dropped)
print("After",curated_df.shape, "# of dropped compounds",num_dropped)
return curated_lst,num_dropped_lst
def upload_df_dtc_mleqonly(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,data_df,dtc_smiles_fileID,
data_origin='journal', species='human',
force_update=False):
"""Uploads DTC mleqonly data to the datastore
Upload mleqonly data to the datastore from the given DataFrame. The DataFrame
must contain the column 'rdkit_smiles' and 'VALUE_NUM_mean'. This function is
meant to upload data that has been aggregated using
atomsci.ddm.utils.curate_data.average_and_remove_duplicates.
Returns the datastore OID of the uploaded dataset. The dataset is uploaded to the
public bucket and lists https://doi.org/10.1016/j.chembiol.2017.11.009' as the doi.
This also assumes that the id_col is 'compound_id'.
Args:
dset_name (str): Name of the dataset. Should not include a file extension.
title (str): title of the file in (human friendly format)
description (str): long text box to describe file (background/use notes)
tags (list): Must be a list of strings.
functional_area (str): The functional area.
target (str): The target.
target_type (str): The target type of the dataset.
activity (str): The activity of the dataset.
assay_category (str): The assay category of the dataset.
data_df (DataFrame): DataFrame to be uploaded.
dtc_smiles_fileID (str): Source file id used to generate data_df.
data_origin (str): The origin of the dataset e.g. journal.
species (str): The species of the dataset e.g. human, rat, dog.
force_update (bool): Overwrite existing datasets in the datastore.
Returns:
str: datastore OID of the uploaded dataset.
"""
bucket = 'public'
filename = '%s_dtc_mleqonly.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id',
'response_col' : 'VALUE_NUM_mean',
'prediction_type' : 'regression',
'smiles_col' : 'rdkit_smiles',
'units' : 'unitless',
'source_file_id' : dtc_smiles_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def upload_df_dtc_mleqonly_class(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,data_df,dtc_mleqonly_fileID,
data_origin='journal', species='human',
force_update=False):
"""Uploads DTC mleqonly classification data to the datastore
Upload mleqonly classification data to the datastore from the given DataFrame. The DataFrame
must contain the column 'rdkit_smiles' and 'binary_class'. This function is
meant to upload data that has been aggregated using
atomsci.ddm.utils.curate_data.average_and_remove_duplicates and then thresholded to
make a binary classification dataset.
Returns the datastore OID of the uploaded dataset. The dataset is uploaded to the
public bucket and lists https://doi.org/10.1016/j.chembiol.2017.11.009' as the doi.
This also assumes that the id_col is 'compound_id'.
Args:
dset_name (str): Name of the dataset. Should not include a file extension.
title (str): title of the file in (human friendly format)
description (str): long text box to describe file (background/use notes)
tags (list): Must be a list of strings.
functional_area (str): The functional area.
target (str): The target.
target_type (str): The target type of the dataset.
activity (str): The activity of the dataset.
assay_category (str): The assay category of the dataset.
data_df (DataFrame): DataFrame to be uploaded.
dtc_mleqonly_fileID (str): Source file id used to generate data_df.
data_origin (str): The origin of the dataset e.g. journal.
species (str): The species of the dataset e.g. human, rat, dog.
force_update (bool): Overwrite existing datasets in the datastore.
Returns:
str: datastore OID of the uploaded dataset.
"""
bucket = 'public'
filename = '%s_dtc_mleqonly_class.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id',
'response_col' : 'binary_class',
'prediction_type' : 'classification',
'num_classes' : 2,
'class_names' : ['inactive','active'],
'smiles_col' : 'rdkit_smiles',
'units' : 'unitless',
'source_file_id' : dtc_mleqonly_fileID
}
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def upload_df_dtc_base_smiles_all(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,data_df,dtc_mleqonly_fileID,
data_origin='journal', species='human',
force_update=False):
"""Uploads DTC base smiles data to the datastore
Uploads base SMILES string for the DTC dataset.
Returns the datastore OID of the uploaded dataset. The dataset is uploaded to the
public bucket and lists https://doi.org/10.1016/j.chembiol.2017.11.009' as the doi.
This also assumes that the id_col is 'compound_id', the response column is set to PIC50,
and the SMILES are assumed to be in 'base_rdkit_smiles'.
Args:
dset_name (str): Name of the dataset. Should not include a file extension.
title (str): title of the file in (human friendly format)
description (str): long text box to describe file (background/use notes)
tags (list): Must be a list of strings.
functional_area (str): The functional area.
target (str): The target.
target_type (str): The target type of the dataset.
activity (str): The activity of the dataset.
assay_category (str): The assay category of the dataset.
data_df (DataFrame): DataFrame to be uploaded.
dtc_mleqonly_fileID (str): Source file id used to generate data_df.
data_origin (str): The origin of the dataset e.g. journal.
species (str): The species of the dataset e.g. human, rat, dog.
force_update (bool): Overwrite existing datasets in the datastore.
Returns:
str: datastore OID of the uploaded dataset.
"""
bucket = 'public'
filename = '%s_dtc_base_smiles_all.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id',
'response_col' : 'PIC50',
'prediction_type' : 'regression',
'smiles_col' : 'base_rdkit_smiles',
'units' : 'unitless',
'source_file_id' : dtc_mleqonly_fileID
}
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def upload_file_dtc_smiles_regr_all(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,file_path,dtc_smiles_fileID,
smiles_column, data_origin='journal', species='human',
force_update=False):
"""Uploads regression DTC data to the datastore
Uploads regression dataset for DTC dataset.
Returns the datastore OID of the uploaded dataset. The dataset is uploaded to the
public bucket and lists https://doi.org/10.1016/j.chembiol.2017.11.009' as the doi.
This also assumes that the id_col is 'compound_id', the response column is set to PIC50.
Args:
dset_name (str): Name of the dataset. Should not include a file extension.
title (str): title of the file in (human friendly format)
description (str): long text box to describe file (background/use notes)
tags (list): Must be a list of strings.
functional_area (str): The functional area.
target (str): The target.
target_type (str): The target type of the dataset.
activity (str): The activity of the dataset.
assay_category (str): The assay category of the dataset.
data_df (DataFrame): DataFrame to be uploaded.
dtc_smiles_fileID(str): Source file id used to generate data_df.
smiles_column (str): Column containing SMILES.
data_origin (str): The origin of the dataset e.g. journal.
species (str): The species of the dataset e.g. human, rat, dog.
force_update (bool): Overwrite existing datasets in the datastore.
Returns:
str: datastore OID of the uploaded dataset.
"""
bucket = 'public'
filename = '%s_dtc_smiles_regr_all.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id',
'response_col' : 'PIC50',
'prediction_type' : 'regression',
'smiles_col' : smiles_column,
'units' : 'unitless',
'source_file_id' : dtc_smiles_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
#uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def upload_df_dtc_smiles_regr_all_class(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,data_df,dtc_smiles_regr_all_fileID,
smiles_column, data_origin='journal', species='human',
force_update=False):
"""Uploads DTC classification data to the datastore
Uploads binary classiciation data for the DTC dataset. Classnames are assumed to
be 'active' and 'inactive'
Returns the datastore OID of the uploaded dataset. The dataset is uploaded to the
public bucket and lists https://doi.org/10.1016/j.chembiol.2017.11.009' as the doi.
This also assumes that the id_col is 'compound_id', the response column is set to PIC50.
Args:
dset_name (str): Name of the dataset. Should not include a file extension.
title (str): title of the file in (human friendly format)
description (str): long text box to describe file (background/use notes)
tags (list): Must be a list of strings.
functional_area (str): The functional area.
target (str): The target.
target_type (str): The target type of the dataset.
activity (str): The activity of the dataset.
assay_category (str): The assay category of the dataset.
data_df (DataFrame): DataFrame to be uploaded.
dtc_smiles_regr_all_fileID(str): Source file id used to generate data_df.
smiles_column (str): Column containing SMILES.
data_origin (str): The origin of the dataset e.g. journal.
species (str): The species of the dataset e.g. human, rat, dog.
force_update (bool): Overwrite existing datasets in the datastore.
Returns:
str: datastore OID of the uploaded dataset.
"""
bucket = 'public'
filename = '%s_dtc_smiles_regr_all_class.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id',
'response_col' : 'PIC50',
'prediction_type' : 'classification',
'num_classes' : 2,
'smiles_col' : smiles_column,
'class_names' : ['inactive','active'],
'units' : 'unitless',
'source_file_id' : dtc_smiles_regr_all_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
# ----------------------------------------------------------------------------------------------------------------------
# Excape-specific curation functions
# ----------------------------------------------------------------------------------------------------------------------
def upload_file_excape_raw_data(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,file_path,
data_origin='journal', species='human',
force_update=False):
"""Uploads raw Excape data to the datastore
Upload a raw dataset to the datastore from the given DataFrame.
Returns the datastore OID of the uploaded dataset. The dataset is uploaded to the
public bucket and lists https://dx.doi.org/10.1186%2Fs13321-017-0203-5 as the doi.
This also assumes that the id_col is 'Original_Entry_ID'
Args:
dset_name (str): Name of the dataset. Should not include a file extension.
title (str): title of the file in (human friendly format)
description (str): long text box to describe file (background/use notes)
tags (list): Must be a list of strings.
functional_area (str): The functional area.
target (str): The target.
target_type (str): The target type of the dataset.
activity (str): The activity of the dataset.
assay_category (str): The assay category of the dataset.
file_path (str): The filepath of the dataset.
data_origin (str): The origin of the dataset e.g. journal.
species (str): The species of the dataset e.g. human, rat, dog.
force_update (bool): Overwrite existing datasets in the datastore.
Returns:
str: datastore OID of the uploaded dataset.
"""
bucket = 'public'
filename = '%s_excape.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category':assay_category,
'assay_endpoint' : 'multiple values',
'curation_level': 'raw',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://dx.doi.org/10.1186%2Fs13321-017-0203-5', # ExCAPE-DB
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'Original_Entry_ID'
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
#uploaded_file = dsf.upload_df_to_DS(dset_df, bucket, filename=filename, title=title,
# description=description,
# tags=tags, key_values=kv, client=None, dataset_key=dataset_key,
# override_check=True, return_metadata=True)
uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def get_smiles_excape_data(nm_df,targ_lst):
"""Calculate base rdkit smiles
Divides up nm_df based on target and makes one DataFrame for each target.
Rows with NaN pXC50 values are dropped. Base rdkit SMILES are calculated
from the SMILES column using
atomsci.ddm.utils.struct_utils.base_rdkit_smiles_from_smiles. A new column,
'rdkit_smiles, is added to each output DataFrame.
Args:
nm_df (DataFrame): DataFrame for Excape database. Should contain the columns,
pXC50, SMILES, and Ambit_InchiKey
targ_lst (list): A list of targets to filter out of nm_df
Returns:
list, list: A list of DataFrames, one for each target, and a list of
all inchi keys used in the dataset.
"""
# Delete NaN
nm_df = nm_df.dropna(subset=['pXC50'])
# (Yaru) Use nm_df, which has removed nan's
# Don't need to retrieve SMILES, since already in excape file
# No filtering by censored
save_df={}
targ = targ_lst[0]
save_df[targ_lst[0]] = nm_df
print(targ,"distinct compounds = only",nm_df['Ambit_InchiKey'].nunique())
shared_inchi_keys = nm_df['Ambit_InchiKey']
# Merge in SMILES strings
smiles_lst=[]
save_df[targ_lst[0]] = nm_df
for targ in targ_lst :
df=save_df[targ]
smiles_df = df
#df['PIC50']=df['standard_value'].apply(ic50topic50)
#smiles_df=df.merge(save_smiles_df,on='standard_inchi_key',suffixes=('_'+targ,'_'))
#the file puts the SMILES string in quotes, which need to be removed
#smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
smiles_df['rdkit_smiles']=smiles_df['SMILES'].apply(struct_utils.base_smiles_from_smiles)
#smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
print(smiles_df.shape)
print(smiles_df['Ambit_InchiKey'].nunique())
smiles_lst.append(smiles_df)
return smiles_lst, shared_inchi_keys
def upload_df_excape_smiles(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,smiles_df,orig_fileID,
data_origin='journal', species='human',
force_update=False):
"""Uploads Excape SMILES data to the datastore
Upload SMILES to the datastore from the given DataFrame.
Returns the datastore OID of the uploaded dataset. The dataset is uploaded to the
public bucket and lists https://dx.doi.org/10.1186%2Fs13321-017-0203-5 as the doi.
This also assumes that the id_col is 'Original_Entry_ID'
Args:
dset_name (str): Name of the dataset. Should not include a file extension.
title (str): title of the file in (human friendly format)
description (str): long text box to describe file (background/use notes)
tags (list): Must be a list of strings.
functional_area (str): The functional area.
target (str): The target.
target_type (str): The target type of the dataset.
activity (str): The activity of the dataset.
assay_category (str): The assay category of the dataset.
smiles_df (DataFrame): DataFrame containing SMILES to be uploaded.
orig_fileID (str): Source file id used to generate smiles_df.
data_origin (str): The origin of the dataset e.g. journal.
species (str): The species of the dataset e.g. human, rat, dog.
force_update (bool): Overwrite existing datasets in the datastore.
Returns:
str: datastore OID of the uploaded dataset.
"""
bucket = 'public'
#he6: this used to say _dtc_smiles.csv
filename = '%s_excape_smiles.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'raw',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://dx.doi.org/10.1186%2Fs13321-017-0203-5', # ExCAPE-DB
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'Original_Entry_ID',
'source_file_id' : orig_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=smiles_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def atom_curation_excape(targ_lst, smiles_lst, shared_inchi_keys):
"""Apply ATOM standard 'curation' step
Apply ATOM standard 'curation' step: Average replicate assays,
remove duplicates and drop cases with large variance between replicates.
Rows with NaN values in rdkit_smiles, VALUE_NUM_mean, and pXC50 are dropped
Args:
targ_lst (list): A list of targets.
smiles_lst (list): A of DataFrames.
These DataFrames must contain the columns gene_names, standard_type,
standard_relation, standard_inchi_key, pXC50, and rdkit_smiles
shared_inchi_keys (list): A list of inchi keys used in this dataset.
Returns:
list:A list of curated DataFrames
"""
imp.reload(curate_data)
tolerance=10
column='pXC50'; #'standard_value'
list_bad_duplicates='No'
max_std=1
curated_lst=[]
#print(targ_lst)
#print(smiles_lst)
for it in range(len(targ_lst)) :
data=smiles_lst[it]
#data = data[data.standard_relation.str.strip() == '=']
#print("gene_names",data.gene_names.unique())
#print("standard_type",data.standard_type.unique())
#print("standard_relation",data.standard_relation.unique())
print("before",data.shape)
curated_df=curate_data.average_and_remove_duplicates (column, tolerance, list_bad_duplicates, data, max_std, compound_id='standard_inchi_key',smiles_col='rdkit_smiles')
# (Yaru) Remove inf in curated_df
curated_df = curated_df[~curated_df.isin([np.inf]).any(1)]
# (Yaru) Remove nan on rdkit_smiles
curated_df = curated_df.dropna(subset=['rdkit_smiles'])
curated_df = curated_df.dropna(subset=['VALUE_NUM_mean'])
curated_df = curated_df.dropna(subset=['pXC50'])
# (Kevin)
# Filter criteria:
# pXC50 not missing
# rdkit_smiles not blank
# pXC50 > 3
#dset_df = dset_df[dset_df.pXC50 >= 3.0]
curated_lst.append(curated_df)
prev_cmpd_cnt=shared_inchi_keys.nunique()
num_dropped=prev_cmpd_cnt-curated_df.shape[0]
print("After",curated_df.shape, "# of dropped compounds",num_dropped)
return curated_lst
def upload_df_excape_mleqonly(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,data_df,smiles_fileID,
data_origin='journal', species='human',
force_update=False):
"""Uploads Excape mleqonly data to the datastore
Upload mleqonly to the datastore from the given DataFrame.
Returns the datastore OID of the uploaded dataset. The dataset is uploaded to the
public bucket and lists https://dx.doi.org/10.1186%2Fs13321-017-0203-5 as the doi.
This also assumes that the id_col is 'Original_Entry_ID', smiles_col is 'rdkit_smiles'
and response_col is 'VALUE_NUM_mean'.
Args:
dset_name (str): Name of the dataset. Should not include a file extension.
title (str): title of the file in (human friendly format)
description (str): long text box to describe file (background/use notes)
tags (list): Must be a list of strings.
functional_area (str): The functional area.
target (str): The target.
target_type (str): The target type of the dataset.
activity (str): The activity of the dataset.
assay_category (str): The assay category of the dataset.
data_df (DataFrame): DataFrame containing SMILES to be uploaded.
smiles_fileID (str): Source file id used to generate data_df.
data_origin (str): The origin of the dataset e.g. journal.
species (str): The species of the dataset e.g. human, rat, dog.
force_update (bool): Overwrite existing datasets in the datastore.
Returns:
str: datastore OID of the uploaded dataset.
"""
bucket = 'public'
#he6: this used to say _dtc_mleqonly.csv
filename = '%s_excape_mleqonly.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://dx.doi.org/10.1186%2Fs13321-017-0203-5', # ExCAPE-DB
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'Original_Entry_ID',
'response_col' : 'VALUE_NUM_mean',
'prediction_type' : 'regression',
'smiles_col' : 'rdkit_smiles',
'units' : 'unitless',
'source_file_id' : smiles_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def upload_df_excape_mleqonly_class(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,data_df,mleqonly_fileID,
data_origin='journal', species='human',
force_update=False):
"""Uploads Excape mleqonly classification data to the datastore
data_df contains a binary classification dataset with 'active' and 'incative' classes.
Upload mleqonly classification to the datastore from the given DataFrame.
Returns the datastore OID of the uploaded dataset. The dataset is uploaded to the
public bucket and lists https://dx.doi.org/10.1186%2Fs13321-017-0203-5 as the doi.
This also assumes that the id_col is 'Original_Entry_ID', smiles_col is 'rdkit_smiles'
and response_col is 'binary_class'.
Args:
dset_name (str): Name of the dataset. Should not include a file extension.
title (str): title of the file in (human friendly format)
description (str): long text box to describe file (background/use notes)
tags (list): Must be a list of strings.
functional_area (str): The functional area.
target (str): The target.
target_type (str): The target type of the dataset.
activity (str): The activity of the dataset.
assay_category (str): The assay category of the dataset.
data_df (DataFrame): DataFrame containing SMILES to be uploaded.
mleqonly_fileID (str): Source file id used to generate data_df.
data_origin (str): The origin of the dataset e.g. journal.
species (str): The species of the dataset e.g. human, rat, dog.
force_update (bool): Overwrite existing datasets in the datastore.
Returns:
str: datastore OID of the uploaded dataset.
"""
bucket = 'public'
#he6: this used to say _dtc_mleqonly.csv
filename = '%s_excape_mleqonly_class.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://dx.doi.org/10.1186%2Fs13321-017-0203-5', # ExCAPE-DB
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id',
'response_col' : 'binary_class',
'prediction_type' : 'classification',
'num_classes' : 2,
'class_names' : ['inactive','active'],
'smiles_col' : 'rdkit_smiles',
'units' : 'unitless',
'source_file_id' : mleqonly_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
| 41.759529
| 256
| 0.663934
| 7,936
| 60,259
| 4.820439
| 0.056326
| 0.032414
| 0.021331
| 0.015919
| 0.875781
| 0.864148
| 0.848255
| 0.840753
| 0.835577
| 0.832336
| 0
| 0.013386
| 0.225178
| 60,259
| 1,442
| 257
| 41.788488
| 0.805954
| 0.473108
| 0
| 0.757877
| 0
| 0
| 0.221692
| 0.005301
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034826
| false
| 0
| 0.021559
| 0
| 0.089552
| 0.079602
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1e4cbb6b9152cf24398ca23c531f2ad29cd72e9e
| 10,340
|
py
|
Python
|
notebooks/mypysmps/graph/_cm.py
|
fvanden/SOC-app
|
488322c05b6ebe646f003274fa1a2125d993ecd0
|
[
"MIT"
] | null | null | null |
notebooks/mypysmps/graph/_cm.py
|
fvanden/SOC-app
|
488322c05b6ebe646f003274fa1a2125d993ecd0
|
[
"MIT"
] | null | null | null |
notebooks/mypysmps/graph/_cm.py
|
fvanden/SOC-app
|
488322c05b6ebe646f003274fa1a2125d993ecd0
|
[
"MIT"
] | null | null | null |
customcb = {'_smps_flo': ["#000000",
"#000002",
"#000004",
"#000007",
"#000009",
"#00000b",
"#00000d",
"#000010",
"#000012",
"#000014",
"#000016",
"#000019",
"#00001b",
"#00001d",
"#00001f",
"#000021",
"#000024",
"#000026",
"#000028",
"#00002a",
"#00002d",
"#00002f",
"#000031",
"#000033",
"#000036",
"#000038",
"#00003a",
"#00003c",
"#00003e",
"#000041",
"#000043",
"#000045",
"#000047",
"#00004a",
"#00004c",
"#00004e",
"#000050",
"#000053",
"#000055",
"#000057",
"#000059",
"#00005b",
"#00005e",
"#000060",
"#000062",
"#000064",
"#000067",
"#000069",
"#00006b",
"#00006d",
"#000070",
"#000072", # "#000074",
"#000076",
"#000078",
"#00007b", # "#00007d",
"#00007f",
"#000081", # "#000084",
"#000086",
"#000088", # "#00008a",
"#00008d",
"#00018e", # "#00038c",
"#00068b",
"#000989", # "#000b88",
"#000e87",
"#001185",
"#001384", # "#001682",
"#001881",
"#001b7f",
"#001e7e",
"#00207d",
"#00237b",
"#00267a",
"#002878",
"#002b77",
"#002e75",
"#003074",
"#003372",
"#003671",
"#003870",
"#003b6e",
"#003d6d",
"#00406b",
"#00436a",
"#004568",
"#004867",
"#004b65",
"#004d64",
"#005063",
"#005361",
"#005560",
"#00585e",
"#005b5d",
"#005d5b",
"#00605a",
"#006258",
"#006557",
"#006856",
"#006a54",
"#006d53", # dark azul
"#007051",
"#007151", # input
"#007150", # input
"#007250",
"#00754e", # azul
"#00764E",
"#00774E",
"#00784d",
"#007a4b",
"#007d4a", # azul green
"#007f49",
"#008247",
"#008546",
"#008744", # donker groen blauwig
"#008a43",
"#008B42",
"#008B41",
"#008C41",
"#008d41",
"#008d41",
"#008f40",
"#00923e",
"#00953d",
"#00963D",
"#00973c",
"#009a3a",
"#009d39",
"#009e38",
"#009f38",
"#009f37",
"#00a236", # 61 licht groen
"#009F35", # 62
"#00a434", # 64
"#00A534", # 64
"#00a634", # 64
"#00A633", # 65
"#00a733", # 65
"#00a434", # 64
"#00A534", # 64
"#00A634", # 64
"#00a733", # 65
"#00A635", # 65
"#02a732",
"#05a431",
"#08a230",
"#0c9f2f",
"#0f9d2f",
"#129a2e",
"#16972d",
"#19952c",
"#1c922c",
"#208f2b",
"#238d2a",
"#268a29",
"#2a8728",
"#2d8528",
"#308227", # donkergroen
"#337f26",
"#377d25",
"#3a7a24",
"#3d7824",
"#417523",
"#447222",
"#477021",
"#4b6d21",
"#4e6a20", # bruingroen
"#51681f",
"#55651e",
"#58621d",
"#5b601d",
"#5f5d1c",
"#625b1b",
"#65581a",
"#695519",
"#6c5319",
"#6f5018",
"#734d17",
"#764b16",
"#794815",
"#7d4515",
"#804314", # bruin
"#834013",
"#873d12",
"#8a3b12",
"#8d3811",
"#903610",
"#94330f",
"#97300e",
"#9a2e0e",
"#9e2b0d",
"#a1280c",
"#a4260b",
"#a8230a",
"#ab200a",
"#ae1e09",
"#b21b08",
"#b51807",
"#b81607",
"#bc1306",
"#bf1105",
"#c20e04",
"#c60b03",
"#c90903",
"#cc0602",
"#d00301",
"#d30100",# donker rood
"#d40200",
"#d40300",
"#d40400",
"#d40500",
"#d40600",
"#d40700",
"#d40800",
"#d40900", # fel rood
"#d40c00",
"#d41000",
"#D41100",
"#D41200",
"#d41300", #
"#D41400",
"#D41500",
"#d41600",
"#d41a00",
"#d41d00",
"#d42000", #
"#d42400",
"#d42700",
"#d42a00", # begin oranje
"#d42b00",
"#d42c00",
"#d42d00",
"#d42e00",
"#D43100",
"#D43200",
"#D43300",
"#d43400",
"#d43500",
"#D43600",
"#D43700",
"#d43800", # 16 donker oranje
"#d43b00", # 18
"#D43C00",
"#D43D00",
"#d43e00", # 18
"#D44200", # hh
"#d44200", # 20
"#d44300",
"#d44400",
"#d44500",
"#d44800",
"#d44c00",
"#d44f00",
"#d45200",
"#d45600",
"#d45900",
"#d45c00",
"#d45f00",
"#d46300",
"#d46600",
"#d46900",
"#d46d00",
"#d47000",
"#d47300",
"#d47700", # wat lichter oranje
"#d47a00",
"#D47B00",
"#D47C00",
"#d47d00",
"#d48100",
"#D48200",
"#D48300",
"#d48400",
"#d48700",
"#d48b00",
"#d48e00",
"#d49100",
"#d49500",
"#d49800",
"#d49b00",
"#d49f00",
"#d4a200",
"#d4a500",
"#d4a900",
"#d4ac00",
"#d4af00", # donker geel
"#d4b300",
"#d4b600",
"#d4b900",
"#d4bc00",
"#d4c000",
"#d4c300",
"#d4c600",
"#d4ca00",
"#d4cd00",
"#d4d000",
"#d4d400",
"#D7D700",
"#DADA00",
"#DCDC00",
"#DFDF00",
"#E1E100",
"#E4E400",
"#E6E600",
"#E9E900",
"#ECEC00",
"#F1F100",
"#F6F200",
"#F6F300",
"#F6F400",
"#F6F600",
"#F6F700",
"#F8F800",
"#FBFB00",
"#FDFD00",
"#FDFE00",
"#FFFD00",
"#FDFF00",
"#FFFF00",
],
'_smps_flo_w' : ["#FFFFFF",
"#000002",
"#000004",
"#000007",
"#000009",
"#00000b",
"#00000d",
"#000010",
"#000012",
"#000014",
"#000016",
"#000019",
"#00001b",
"#00001d",
"#00001f",
"#000021",
"#000024",
"#000026",
"#000028",
"#00002a",
"#00002d",
"#00002f",
"#000031",
"#000033",
"#000036",
"#000038",
"#00003a",
"#00003c",
"#00003e",
"#000041",
"#000043",
"#000045",
"#000047",
"#00004a",
"#00004c",
"#00004e",
"#000050",
"#000053",
"#000055",
"#000057",
"#000059",
"#00005b",
"#00005e",
"#000060",
"#000062",
"#000064",
"#000067",
"#000069",
"#00006b",
"#00006d",
"#000070",
"#000072", # "#000074",
"#000076",
"#000078",
"#00007b", # "#00007d",
"#00007f",
"#000081", # "#000084",
"#000086",
"#000088", # "#00008a",
"#00008d",
"#00018e", # "#00038c",
"#00068b",
"#000989", # "#000b88",
"#000e87",
"#001185",
"#001384", # "#001682",
"#001881",
"#001b7f",
"#001e7e",
"#00207d",
"#00237b",
"#00267a",
"#002878",
"#002b77",
"#002e75",
"#003074",
"#003372",
"#003671",
"#003870",
"#003b6e",
"#003d6d",
"#00406b",
"#00436a",
"#004568",
"#004867",
"#004b65",
"#004d64",
"#005063",
"#005361",
"#005560",
"#00585e",
"#005b5d",
"#005d5b",
"#00605a",
"#006258",
"#006557",
"#006856",
"#006a54",
"#006d53", # dark azul
"#007051",
"#007151", # input
"#007150", # input
"#007250",
"#00754e", # azul
"#00764E",
"#00774E",
"#00784d",
"#007a4b",
"#007d4a", # azul green
"#007f49",
"#008247",
"#008546",
"#008744", # donker groen blauwig
"#008a43",
"#008B42",
"#008B41",
"#008C41",
"#008d41",
"#008d41",
"#008f40",
"#00923e",
"#00953d",
"#00963D",
"#00973c",
"#009a3a",
"#009d39",
"#009e38",
"#009f38",
"#009f37",
"#00a236", # 61 licht groen
"#009F35", # 62
"#00a434", # 64
"#00A534", # 64
"#00a634", # 64
"#00A633", # 65
"#00a733", # 65
"#00a434", # 64
"#00A534", # 64
"#00A634", # 64
"#00a733", # 65
"#00A635", # 65
"#02a732",
"#05a431",
"#08a230",
"#0c9f2f",
"#0f9d2f",
"#129a2e",
"#16972d",
"#19952c",
"#1c922c",
"#208f2b",
"#238d2a",
"#268a29",
"#2a8728",
"#2d8528",
"#308227", # donkergroen
"#337f26",
"#377d25",
"#3a7a24",
"#3d7824",
"#417523",
"#447222",
"#477021",
"#4b6d21",
"#4e6a20", # bruingroen
"#51681f",
"#55651e",
"#58621d",
"#5b601d",
"#5f5d1c",
"#625b1b",
"#65581a",
"#695519",
"#6c5319",
"#6f5018",
"#734d17",
"#764b16",
"#794815",
"#7d4515",
"#804314", # bruin
"#834013",
"#873d12",
"#8a3b12",
"#8d3811",
"#903610",
"#94330f",
"#97300e",
"#9a2e0e",
"#9e2b0d",
"#a1280c",
"#a4260b",
"#a8230a",
"#ab200a",
"#ae1e09",
"#b21b08",
"#b51807",
"#b81607",
"#bc1306",
"#bf1105",
"#c20e04",
"#c60b03",
"#c90903",
"#cc0602",
"#d00301",
"#d30100",# donker rood
"#d40200",
"#d40300",
"#d40400",
"#d40500",
"#d40600",
"#d40700",
"#d40800",
"#d40900", # fel rood
"#d40c00",
"#d41000",
"#D41100",
"#D41200",
"#d41300", #
"#D41400",
"#D41500",
"#d41600",
"#d41a00",
"#d41d00",
"#d42000", #
"#d42400",
"#d42700",
"#d42a00", # begin oranje
"#d42b00",
"#d42c00",
"#d42d00",
"#d42e00",
"#D43100",
"#D43200",
"#D43300",
"#d43400",
"#d43500",
"#D43600",
"#D43700",
"#d43800", # 16 donker oranje
"#d43b00", # 18
"#D43C00",
"#D43D00",
"#d43e00", # 18
"#D44200", # hh
"#d44200", # 20
"#d44300",
"#d44400",
"#d44500",
"#d44800",
"#d44c00",
"#d44f00",
"#d45200",
"#d45600",
"#d45900",
"#d45c00",
"#d45f00",
"#d46300",
"#d46600",
"#d46900",
"#d46d00",
"#d47000",
"#d47300",
"#d47700", # wat lichter oranje
"#d47a00",
"#D47B00",
"#D47C00",
"#d47d00",
"#d48100",
"#D48200",
"#D48300",
"#d48400",
"#d48700",
"#d48b00",
"#d48e00",
"#d49100",
"#d49500",
"#d49800",
"#d49b00",
"#d49f00",
"#d4a200",
"#d4a500",
"#d4a900",
"#d4ac00",
"#d4af00", # donker geel
"#d4b300",
"#d4b600",
"#d4b900",
"#d4bc00",
"#d4c000",
"#d4c300",
"#d4c600",
"#d4ca00",
"#d4cd00",
"#d4d000",
"#d4d400",
"#D7D700",
"#DADA00",
"#DCDC00",
"#DFDF00",
"#E1E100",
"#E4E400",
"#E6E600",
"#E9E900",
"#ECEC00",
"#F1F100",
"#F6F200",
"#F6F300",
"#F6F400",
"#F6F600",
"#F6F700",
"#F8F800",
"#FBFB00",
"#FDFD00",
"#FDFE00",
"#FFFD00",
"#FDFF00",
"#FFFF00",
]}
| 16.15625
| 37
| 0.414507
| 744
| 10,340
| 5.754032
| 0.469086
| 0.007475
| 0.013081
| 0.01495
| 0.991824
| 0.991824
| 0.991824
| 0.991824
| 0.991824
| 0.991824
| 0
| 0.457258
| 0.325725
| 10,340
| 639
| 38
| 16.181534
| 0.15677
| 0.05793
| 0
| 0.993711
| 0
| 0
| 0.462304
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1e7a9ad326f8697ef38d7456e4c52ad73d510302
| 16,571
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/ccx/tests/test_utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/ccx/tests/test_utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/ccx/tests/test_utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
test utils
"""
import uuid
from smtplib import SMTPException
from unittest import mock
from ccx_keys.locator import CCXLocator
from common.djangoapps.student.models import CourseEnrollment, CourseEnrollmentException
from common.djangoapps.student.roles import CourseCcxCoachRole, CourseInstructorRole, CourseStaffRole
from common.djangoapps.student.tests.factories import AdminFactory
from lms.djangoapps.ccx.tests.factories import CcxFactory
from lms.djangoapps.ccx.tests.utils import CcxTestCase
from lms.djangoapps.ccx.utils import add_master_course_staff_to_ccx, ccx_course, remove_master_course_staff_from_ccx
from lms.djangoapps.instructor.access import list_with_level
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestGetCCXFromCCXLocator(ModuleStoreTestCase):
"""Verify that get_ccx_from_ccx_locator functions properly"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""Set up a course, coach, ccx and user"""
super().setUp()
self.course = CourseFactory.create()
coach = self.coach = AdminFactory.create()
role = CourseCcxCoachRole(self.course.id)
role.add_users(coach)
def call_fut(self, course_id):
"""call the function under test in this test case"""
from lms.djangoapps.ccx.utils import get_ccx_from_ccx_locator
return get_ccx_from_ccx_locator(course_id)
def test_non_ccx_locator(self):
"""verify that nothing is returned if locator is not a ccx locator
"""
result = self.call_fut(self.course.id)
assert result is None
def test_ccx_locator(self):
"""verify that the ccx is retuned if using a ccx locator
"""
ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
result = self.call_fut(course_key)
assert result == ccx
class TestStaffOnCCX(CcxTestCase):
"""
Tests for staff on ccx courses.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
super().setUp()
# Create instructor account
self.client.login(username=self.coach.username, password="test")
# create an instance of modulestore
self.mstore = modulestore()
self.make_coach()
self.ccx = self.make_ccx()
self.ccx_locator = CCXLocator.from_course_locator(self.course.id, self.ccx.id)
def test_add_master_course_staff_to_ccx(self):
"""
Test add staff of master course to ccx course
"""
# adding staff to master course.
staff = self.make_staff()
assert CourseStaffRole(self.course.id).has_user(staff)
# adding instructor to master course.
instructor = self.make_instructor()
assert CourseInstructorRole(self.course.id).has_user(instructor)
add_master_course_staff_to_ccx(self.course, self.ccx_locator, self.ccx.display_name)
# assert that staff and instructors of master course has staff and instructor roles on ccx
list_staff_master_course = list_with_level(self.course.id, 'staff')
list_instructor_master_course = list_with_level(self.course.id, 'instructor')
with ccx_course(self.ccx_locator) as course_ccx:
list_staff_ccx_course = list_with_level(course_ccx.id, 'staff')
assert len(list_staff_master_course) == len(list_staff_ccx_course)
assert list_staff_master_course[0].email == list_staff_ccx_course[0].email
list_instructor_ccx_course = list_with_level(course_ccx.id, 'instructor')
assert len(list_instructor_ccx_course) == len(list_instructor_master_course)
assert list_instructor_ccx_course[0].email == list_instructor_master_course[0].email
def test_add_master_course_staff_to_ccx_with_exception(self):
"""
When exception raise from ``enroll_email`` assert that enrollment skipped for that staff or
instructor.
"""
staff = self.make_staff()
assert CourseStaffRole(self.course.id).has_user(staff)
# adding instructor to master course.
instructor = self.make_instructor()
assert CourseInstructorRole(self.course.id).has_user(instructor)
with mock.patch.object(CourseEnrollment, 'enroll_by_email', side_effect=CourseEnrollmentException()):
add_master_course_staff_to_ccx(self.course, self.ccx_locator, self.ccx.display_name)
assert not CourseEnrollment.objects.filter(course_id=self.ccx_locator, user=staff).exists()
assert not CourseEnrollment.objects.filter(course_id=self.ccx_locator, user=instructor).exists()
with mock.patch.object(CourseEnrollment, 'enroll_by_email', side_effect=SMTPException()):
add_master_course_staff_to_ccx(self.course, self.ccx_locator, self.ccx.display_name)
assert not CourseEnrollment.objects.filter(course_id=self.ccx_locator, user=staff).exists()
assert not CourseEnrollment.objects.filter(course_id=self.ccx_locator, user=instructor).exists()
def test_remove_master_course_staff_from_ccx(self):
"""
Test remove staff of master course to ccx course
"""
staff = self.make_staff()
assert CourseStaffRole(self.course.id).has_user(staff)
# adding instructor to master course.
instructor = self.make_instructor()
assert CourseInstructorRole(self.course.id).has_user(instructor)
add_master_course_staff_to_ccx(self.course, self.ccx_locator, self.ccx.display_name, send_email=False)
list_staff_master_course = list_with_level(self.course.id, 'staff')
list_instructor_master_course = list_with_level(self.course.id, 'instructor')
with ccx_course(self.ccx_locator) as course_ccx:
list_staff_ccx_course = list_with_level(course_ccx.id, 'staff')
assert len(list_staff_master_course) == len(list_staff_ccx_course)
assert list_staff_master_course[0].email == list_staff_ccx_course[0].email
list_instructor_ccx_course = list_with_level(course_ccx.id, 'instructor')
assert len(list_instructor_ccx_course) == len(list_instructor_master_course)
assert list_instructor_ccx_course[0].email == list_instructor_master_course[0].email
# assert that role of staff and instructors of master course removed from ccx.
remove_master_course_staff_from_ccx(
self.course, self.ccx_locator, self.ccx.display_name, send_email=False
)
list_staff_ccx_course = list_with_level(course_ccx.id, 'staff')
assert len(list_staff_master_course) != len(list_staff_ccx_course)
list_instructor_ccx_course = list_with_level(course_ccx.id, 'instructor')
assert len(list_instructor_ccx_course) != len(list_instructor_master_course)
for user in list_staff_master_course:
assert user not in list_staff_ccx_course
for user in list_instructor_master_course:
assert user not in list_instructor_ccx_course
def test_remove_master_course_staff_from_ccx_idempotent(self):
"""
Test remove staff of master course from ccx course
"""
staff = self.make_staff()
assert CourseStaffRole(self.course.id).has_user(staff)
# adding instructor to master course.
instructor = self.make_instructor()
assert CourseInstructorRole(self.course.id).has_user(instructor)
outbox = self.get_outbox()
assert len(outbox) == 0
add_master_course_staff_to_ccx(self.course, self.ccx_locator, self.ccx.display_name, send_email=False)
list_staff_master_course = list_with_level(self.course.id, 'staff')
list_instructor_master_course = list_with_level(self.course.id, 'instructor')
with ccx_course(self.ccx_locator) as course_ccx:
list_staff_ccx_course = list_with_level(course_ccx.id, 'staff')
assert len(list_staff_master_course) == len(list_staff_ccx_course)
assert list_staff_master_course[0].email == list_staff_ccx_course[0].email
list_instructor_ccx_course = list_with_level(course_ccx.id, 'instructor')
assert len(list_instructor_ccx_course) == len(list_instructor_master_course)
assert list_instructor_ccx_course[0].email == list_instructor_master_course[0].email
# assert that role of staff and instructors of master course removed from ccx.
remove_master_course_staff_from_ccx(
self.course, self.ccx_locator, self.ccx.display_name, send_email=True
)
assert len(outbox) == (len(list_staff_master_course) + len(list_instructor_master_course))
list_staff_ccx_course = list_with_level(course_ccx.id, 'staff')
assert len(list_staff_master_course) != len(list_staff_ccx_course)
list_instructor_ccx_course = list_with_level(course_ccx.id, 'instructor')
assert len(list_instructor_ccx_course) != len(list_instructor_master_course)
for user in list_staff_master_course:
assert user not in list_staff_ccx_course
for user in list_instructor_master_course:
assert user not in list_instructor_ccx_course
# Run again
remove_master_course_staff_from_ccx(self.course, self.ccx_locator, self.ccx.display_name)
assert len(outbox) == (len(list_staff_master_course) + len(list_instructor_master_course))
with ccx_course(self.ccx_locator) as course_ccx:
list_staff_ccx_course = list_with_level(course_ccx.id, 'staff')
assert len(list_staff_master_course) != len(list_staff_ccx_course)
list_instructor_ccx_course = list_with_level(course_ccx.id, 'instructor')
assert len(list_instructor_ccx_course) != len(list_instructor_master_course)
for user in list_staff_master_course:
assert user not in list_staff_ccx_course
for user in list_instructor_master_course:
assert user not in list_instructor_ccx_course
def test_add_master_course_staff_to_ccx_display_name(self):
"""
Test add staff of master course to ccx course.
Specific test to check that a passed display name is in the
subject of the email sent to the enrolled users.
"""
staff = self.make_staff()
assert CourseStaffRole(self.course.id).has_user(staff)
# adding instructor to master course.
instructor = self.make_instructor()
assert CourseInstructorRole(self.course.id).has_user(instructor)
outbox = self.get_outbox()
# create a unique display name
display_name = f'custom_display_{uuid.uuid4()}'
list_staff_master_course = list_with_level(self.course.id, 'staff')
list_instructor_master_course = list_with_level(self.course.id, 'instructor')
assert len(outbox) == 0
# give access to the course staff/instructor
add_master_course_staff_to_ccx(self.course, self.ccx_locator, display_name)
assert len(outbox) == (len(list_staff_master_course) + len(list_instructor_master_course))
for email in outbox:
assert display_name in email.subject
def test_remove_master_course_staff_from_ccx_display_name(self):
"""
Test remove role of staff of master course on ccx course.
Specific test to check that a passed display name is in the
subject of the email sent to the unenrolled users.
"""
staff = self.make_staff()
assert CourseStaffRole(self.course.id).has_user(staff)
# adding instructor to master course.
instructor = self.make_instructor()
assert CourseInstructorRole(self.course.id).has_user(instructor)
outbox = self.get_outbox()
add_master_course_staff_to_ccx(self.course, self.ccx_locator, self.ccx.display_name, send_email=False)
# create a unique display name
display_name = f'custom_display_{uuid.uuid4()}'
list_staff_master_course = list_with_level(self.course.id, 'staff')
list_instructor_master_course = list_with_level(self.course.id, 'instructor')
assert len(outbox) == 0
# give access to the course staff/instructor
remove_master_course_staff_from_ccx(self.course, self.ccx_locator, display_name)
assert len(outbox) == (len(list_staff_master_course) + len(list_instructor_master_course))
for email in outbox:
assert display_name in email.subject
def test_add_master_course_staff_to_ccx_idempotent(self):
"""
Test add staff of master course to ccx course multiple time will
not result in multiple enrollments.
"""
staff = self.make_staff()
assert CourseStaffRole(self.course.id).has_user(staff)
# adding instructor to master course.
instructor = self.make_instructor()
assert CourseInstructorRole(self.course.id).has_user(instructor)
outbox = self.get_outbox()
list_staff_master_course = list_with_level(self.course.id, 'staff')
list_instructor_master_course = list_with_level(self.course.id, 'instructor')
assert len(outbox) == 0
# run the assignment the first time
add_master_course_staff_to_ccx(self.course, self.ccx_locator, self.ccx.display_name)
assert len(outbox) == (len(list_staff_master_course) + len(list_instructor_master_course))
with ccx_course(self.ccx_locator) as course_ccx:
list_staff_ccx_course = list_with_level(course_ccx.id, 'staff')
list_instructor_ccx_course = list_with_level(course_ccx.id, 'instructor')
assert len(list_staff_master_course) == len(list_staff_ccx_course)
for user in list_staff_master_course:
assert user in list_staff_ccx_course
assert len(list_instructor_master_course) == len(list_instructor_ccx_course)
for user in list_instructor_master_course:
assert user in list_instructor_ccx_course
# run the assignment again
add_master_course_staff_to_ccx(self.course, self.ccx_locator, self.ccx.display_name)
# there are no new duplicated email
assert len(outbox) == (len(list_staff_master_course) + len(list_instructor_master_course))
# there are no duplicated staffs
with ccx_course(self.ccx_locator) as course_ccx:
list_staff_ccx_course = list_with_level(course_ccx.id, 'staff')
list_instructor_ccx_course = list_with_level(course_ccx.id, 'instructor')
assert len(list_staff_master_course) == len(list_staff_ccx_course)
for user in list_staff_master_course:
assert user in list_staff_ccx_course
assert len(list_instructor_master_course) == len(list_instructor_ccx_course)
for user in list_instructor_master_course:
assert user in list_instructor_ccx_course
def test_add_master_course_staff_to_ccx_no_email(self):
"""
Test add staff of master course to ccx course without
sending enrollment email.
"""
staff = self.make_staff()
assert CourseStaffRole(self.course.id).has_user(staff)
# adding instructor to master course.
instructor = self.make_instructor()
assert CourseInstructorRole(self.course.id).has_user(instructor)
outbox = self.get_outbox()
assert len(outbox) == 0
add_master_course_staff_to_ccx(self.course, self.ccx_locator, self.ccx.display_name, send_email=False)
assert len(outbox) == 0
def test_remove_master_course_staff_from_ccx_no_email(self):
"""
Test remove role of staff of master course on ccx course without
sending enrollment email.
"""
staff = self.make_staff()
assert CourseStaffRole(self.course.id).has_user(staff)
# adding instructor to master course.
instructor = self.make_instructor()
assert CourseInstructorRole(self.course.id).has_user(instructor)
outbox = self.get_outbox()
assert len(outbox) == 0
remove_master_course_staff_from_ccx(self.course, self.ccx_locator, self.ccx.display_name, send_email=False)
assert len(outbox) == 0
| 47.345714
| 116
| 0.709553
| 2,172
| 16,571
| 5.090239
| 0.074125
| 0.111795
| 0.039074
| 0.053184
| 0.840358
| 0.819374
| 0.805264
| 0.792059
| 0.763386
| 0.763386
| 0
| 0.00169
| 0.214471
| 16,571
| 349
| 117
| 47.481375
| 0.847661
| 0.126848
| 0
| 0.742991
| 0
| 0
| 0.021378
| 0.004106
| 0
| 0
| 0
| 0
| 0.336449
| 1
| 0.065421
| false
| 0.004673
| 0.070093
| 0
| 0.158879
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bf53b0f16c36e8a74d4ddbedf4ebe52362a0b391
| 2,255
|
py
|
Python
|
elytrus.py
|
Elytrus/elytrus
|
3bacb2669bb17b3c10ada423a0edb81432add4ef
|
[
"MIT"
] | null | null | null |
elytrus.py
|
Elytrus/elytrus
|
3bacb2669bb17b3c10ada423a0edb81432add4ef
|
[
"MIT"
] | null | null | null |
elytrus.py
|
Elytrus/elytrus
|
3bacb2669bb17b3c10ada423a0edb81432add4ef
|
[
"MIT"
] | null | null | null |
import time, os # Time for the delay, os to center the text
wait = 0.1 # Delay between each line
width = os.get_terminal_size().columns # Width of the console
# Clear the screen
def clear():
os.system("cls" if os.name == "nt" else "clear")
# Intro
def intro():
print(r""" _ _ _ _ _ _ _ _ """.center(width))
time.sleep(wait)
print(r""" /\ \ _\ \ /\ \ /\_\ /\ \ /\ \ /\_\ / /\ """.center(width))
time.sleep(wait)
print(r""" / \ \ /\__ \ \ \ \ / / / \_\ \ / \ \ / / / _ / / \ """.center(width))
time.sleep(wait)
print(r""" / /\ \ \ / /_ \_\ \ \ \_/ / / /\__ \ / /\ \ \ \ \ \__ /\_\ / / /\ \__ """.center(width))
time.sleep(wait)
print(r""" / / /\ \_\ / / /\/_/ \ \___/ / / /_ \ \ / / /\ \_\ \ \___\ / / / / / /\ \___\ """.center(width))
time.sleep(wait)
print(r""" / /_/_ \/_/ / / / \ \ \_/ / / /\ \ \ / / /_/ / / \__ / / / / \ \ \ \/___/ """.center(width))
time.sleep(wait)
print(r""" / /____/\ / / / \ \ \ / / / \/_/ / / /__\/ / / / / / / / \ \ \ """.center(width))
time.sleep(wait)
print(r""" / /\____\/ / / / ____ \ \ \ / / / / / /_____/ / / / / / / _ \ \ \ """.center(width))
time.sleep(wait)
print(r""" / / /______ / /_/_/ ___/\ \ \ \ / / / / / /\ \ \ / / /___/ / / /_/\__/ / / """.center(width))
time.sleep(wait)
print(r"""/ / /_______\ /_______/\__\/ \ \_\ /_/ / / / / \ \ \ / / /____\/ / \ \/___/ / """.center(width))
time.sleep(wait)
print(r"""\/__________/ \_______\/ \/_/ \_\/ \/_/ \_\/ \/_________/ \_____\/ """.center(width))
time.sleep(wait)
print("\n") # A new line doesn't work with .center :(
print("Studios".center(width))
time.sleep(3)
clear() # Clears the console for the next text
| 60.945946
| 144
| 0.337472
| 148
| 2,255
| 4.141892
| 0.310811
| 0.215334
| 0.293638
| 0.391517
| 0.546493
| 0.546493
| 0.546493
| 0.546493
| 0.546493
| 0.546493
| 0
| 0.002358
| 0.43592
| 2,255
| 37
| 145
| 60.945946
| 0.47956
| 0.082483
| 0
| 0.34375
| 0
| 0.28125
| 0.606693
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.03125
| 0
| 0.09375
| 0.40625
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
bf5b1636c4e0abae3eb1806ed909afaf40f6a82d
| 685
|
py
|
Python
|
tests/test_utils_path.py
|
karkason/pywinsandbox
|
1316bd046b66abce8c7c565eb55cd26309585e8a
|
[
"MIT"
] | 82
|
2020-03-24T20:26:10.000Z
|
2022-03-16T13:12:39.000Z
|
tests/test_utils_path.py
|
karkason/pywinsandbox
|
1316bd046b66abce8c7c565eb55cd26309585e8a
|
[
"MIT"
] | 6
|
2020-03-24T20:46:18.000Z
|
2022-01-29T11:10:53.000Z
|
tests/test_utils_path.py
|
karkason/pywinsandbox
|
1316bd046b66abce8c7c565eb55cd26309585e8a
|
[
"MIT"
] | 12
|
2020-03-26T06:09:05.000Z
|
2021-12-13T04:02:51.000Z
|
from winsandbox.utils.path import shared_folder_path_in_sandbox, WINDOWS_SANDBOX_DEFAULT_DESKTOP
def test_shared_folder_path_in_sandbox():
assert shared_folder_path_in_sandbox(r"C:\test.txt") == WINDOWS_SANDBOX_DEFAULT_DESKTOP / "test.txt"
assert shared_folder_path_in_sandbox(r"D:\test.txt") == WINDOWS_SANDBOX_DEFAULT_DESKTOP / "test.txt"
assert shared_folder_path_in_sandbox(r"test.txt") == WINDOWS_SANDBOX_DEFAULT_DESKTOP / "test.txt"
assert shared_folder_path_in_sandbox(r"C:\some\test.txt") == WINDOWS_SANDBOX_DEFAULT_DESKTOP / "test.txt"
assert shared_folder_path_in_sandbox(r"C:\some\random\path\test.txt") == WINDOWS_SANDBOX_DEFAULT_DESKTOP / "test.txt"
| 68.5
| 121
| 0.807299
| 104
| 685
| 4.865385
| 0.201923
| 0.13834
| 0.221344
| 0.249012
| 0.851779
| 0.752964
| 0.752964
| 0.752964
| 0.604743
| 0.604743
| 0
| 0
| 0.086131
| 685
| 9
| 122
| 76.111111
| 0.808307
| 0
| 0
| 0
| 0
| 0
| 0.166423
| 0.040876
| 0
| 0
| 0
| 0
| 0.714286
| 1
| 0.142857
| true
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
449d9a085a43aebe60e9e6d048dc097f17847fd9
| 10,744
|
py
|
Python
|
baconian/test/tests/test_common/test_data_pre_processing.py
|
yitongx/baconian-public
|
a67e23c6bc6bfe7019ec9532a3d18f06aed6bbbb
|
[
"MIT"
] | 69
|
2020-01-31T17:44:43.000Z
|
2022-03-28T13:09:11.000Z
|
baconian/test/tests/test_common/test_data_pre_processing.py
|
yitongx/baconian-project
|
e84508da60877e387344133a11039edaac35c5bf
|
[
"MIT"
] | 5
|
2019-04-28T07:24:26.000Z
|
2020-01-29T01:49:51.000Z
|
baconian/test/tests/test_common/test_data_pre_processing.py
|
yitongx/baconian-project
|
e84508da60877e387344133a11039edaac35c5bf
|
[
"MIT"
] | 6
|
2019-05-04T02:18:11.000Z
|
2019-12-04T22:05:52.000Z
|
from baconian.envs.gym_env import make
from baconian.core.core import EnvSpec
from baconian.test.tests.set_up.setup import BaseTestCase
from baconian.common.data_pre_processing import *
import numpy as np
class TestDataPreProcessing(BaseTestCase):
def test_min_max(self):
for env in (make('Pendulum-v0'), make('Acrobot-v1'), make('HalfCheetahBulletEnv-v0')):
for sample_space in (env.observation_space, env.action_space):
sample_fn = sample_space.sample
dims = sample_space.flat_dim
try:
print("test {} with sample {} dims {}".format(env, sample_fn, dims))
# test batch scaler
min_max = BatchMinMaxScaler(dims=dims)
data_list = []
for i in range(100):
data_list.append(sample_fn())
data = min_max.process(np.array(data_list))
self.assertTrue(np.greater_equal(np.ones(dims),
data).all())
self.assertTrue(np.less_equal(np.zeros(dims),
data).all())
# test batch scaler with given range
min_max = BatchMinMaxScaler(dims=dims,
desired_range=(np.ones(dims) * -1.0,
np.ones(dims) * 5.0))
data_list = []
for i in range(100):
data_list.append(sample_fn())
data = min_max.process(np.array(data_list))
self.assertTrue(np.greater_equal(np.ones(dims) * 5.0,
data).all())
self.assertTrue(np.less_equal(np.ones(dims) * -1.0,
data).all())
self.assertEqual(np.max(data), 5.0)
self.assertEqual(np.min(data), -1.0)
data = min_max.inverse_process(data)
self.assertTrue(np.isclose(data, np.array(data_list)).all())
# test batch scaler with given range and given initial data
data_list = []
for i in range(100):
data_list.append(sample_fn())
min_max = RunningMinMaxScaler(dims=dims,
desired_range=(np.ones(dims) * -1.0,
np.ones(dims) * 5.0),
init_data=np.array(data_list))
data = min_max.process(np.array(data_list))
self.assertTrue(np.greater_equal(np.ones(dims) * 5.0,
data).all())
self.assertTrue(np.less_equal(np.ones(dims) * -1.0,
data).all())
self.assertEqual(np.max(data), 5.0)
self.assertEqual(np.min(data), -1.0)
# test batch scaler with given range and given initial min and max
data_list = []
for i in range(100):
data_list.append(sample_fn())
min_max = RunningMinMaxScaler(dims=dims,
desired_range=(np.ones(dims) * -1.0,
np.ones(dims) * 5.0),
init_min=np.min(np.array(data_list), axis=0),
init_max=np.max(np.array(data_list), axis=0))
data = min_max.process(np.array(data_list))
self.assertTrue(np.greater_equal(np.ones(dims) * 5.0,
data).all())
self.assertTrue(np.less_equal(np.ones(dims) * -1.0,
data).all())
self.assertEqual(np.max(data), 5.0)
self.assertEqual(np.min(data), -1.0)
# test update function by a larger range of data
pre_min = np.min(np.array(data_list), axis=0)
pre_max = np.max(np.array(data_list), axis=0)
data_list = np.array(data_list) * 2.0
min_max.update_scaler(data_list)
self.assertTrue(np.equal(pre_min * 2.0, min_max._min).all())
self.assertTrue(np.equal(pre_max * 2.0, min_max._max).all())
except ShapeNotCompatibleError as e:
from baconian.common.spaces import Box
if isinstance(sample_space, Box):
raise ValueError
else:
pass
def test_standard_scaler(self):
for env in (make('Pendulum-v0'), make('Acrobot-v1'), make('HalfCheetahBulletEnv-v0')):
for sample_space in (env.observation_space, env.action_space):
sample_fn = sample_space.sample
dims = sample_space.flat_dim
try:
# test batch standard scaler
standard_scaler = BatchStandardScaler(dims=dims)
data_list = []
for i in range(100):
data_list.append(sample_fn())
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
data = standard_scaler.inverse_process(data)
self.assertTrue(np.isclose(data, np.array(data_list)).all())
# test running standard scaler
standard_scaler = RunningStandardScaler(dims=dims)
data_list = []
for i in range(100):
data_list.append(sample_fn())
standard_scaler.update_scaler(np.array(data_list))
self.assertEqual(standard_scaler._data_count, 100)
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
# test update function
new_data_list = []
for i in range(100):
new_data_list.append(sample_fn())
standard_scaler.update_scaler(np.array(new_data_list))
self.assertEqual(standard_scaler._data_count, 200)
data_list += new_data_list
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
# test running scaler with given data
data_list = []
for i in range(100):
data_list.append(sample_fn())
standard_scaler = RunningStandardScaler(dims=dims,
init_data=np.array(data_list))
self.assertEqual(standard_scaler._data_count, 100)
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
# test update of running scaler with given data
new_data_list = []
for i in range(100):
new_data_list.append(sample_fn())
standard_scaler.update_scaler(np.array(new_data_list))
self.assertEqual(standard_scaler._data_count, 200)
data_list += new_data_list
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
# test running scaler with given initial mean, var.
data_list = []
for i in range(100):
data_list.append(sample_fn())
standard_scaler = RunningStandardScaler(dims=dims,
init_mean=np.mean(data_list, axis=0),
init_var=np.var(data_list, axis=0),
init_mean_var_data_count=100)
self.assertEqual(standard_scaler._data_count, 100)
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
new_data_list = []
for i in range(100):
new_data_list.append(sample_fn())
standard_scaler.update_scaler(np.array(new_data_list))
self.assertEqual(standard_scaler._data_count, 200)
data_list += new_data_list
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
except ShapeNotCompatibleError as e:
from baconian.common.spaces import Box
if isinstance(sample_space, Box):
raise ValueError
else:
pass
| 54.538071
| 97
| 0.475428
| 1,143
| 10,744
| 4.307962
| 0.097113
| 0.090983
| 0.084484
| 0.063972
| 0.878351
| 0.83753
| 0.832859
| 0.826361
| 0.819456
| 0.791836
| 0
| 0.027877
| 0.429077
| 10,744
| 196
| 98
| 54.816327
| 0.774861
| 0.113738
| 0
| 0.797297
| 0
| 0
| 0.012426
| 0.004844
| 0
| 0
| 0
| 0.005102
| 0.209459
| 1
| 0.013514
| false
| 0.013514
| 0.047297
| 0
| 0.067568
| 0.006757
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
782c50e5987ced0455fdfb81f1be67266c3a6673
| 4,198
|
py
|
Python
|
z2/part3/updated_part2_batch/jm/parser_errors_2/652951292.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 1
|
2020-04-16T12:13:47.000Z
|
2020-04-16T12:13:47.000Z
|
z2/part3/updated_part2_batch/jm/parser_errors_2/652951292.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:50:15.000Z
|
2020-05-19T14:58:30.000Z
|
z2/part3/updated_part2_batch/jm/parser_errors_2/652951292.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:45:13.000Z
|
2020-06-09T19:18:31.000Z
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 652951292
"""
"""
random actions, total chaos
"""
board = gamma_new(5, 5, 3, 1)
assert board is not None
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 2, 0, 1) == 1
assert gamma_move(board, 3, 1, 1) == 1
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 2, 4, 2) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 4, 1) == 0
assert gamma_move(board, 3, 4, 2) == 0
board303735015 = gamma_board(board)
assert board303735015 is not None
assert board303735015 == (".....\n"
".....\n"
".....\n"
"231..\n"
".....\n")
del board303735015
board303735015 = None
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_free_fields(board, 1) == 3
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 2, 3, 3) == 0
assert gamma_golden_move(board, 2, 1, 2) == 0
board547078655 = gamma_board(board)
assert board547078655 is not None
assert board547078655 == (".....\n"
".....\n"
".....\n"
"231..\n"
".....\n")
del board547078655
board547078655 = None
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_busy_fields(board, 1) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_move(board, 3, 2, 2) == 0
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_busy_fields(board, 1) == 1
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 4, 2) == 0
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 3, 3, 4) == 0
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 2, 0, 0) == 1
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_busy_fields(board, 2) == 2
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 4, 0) == 0
assert gamma_move(board, 1, 4, 4) == 0
assert gamma_move(board, 2, 3, 4) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 2, 3, 4) == 0
assert gamma_move(board, 2, 4, 4) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_move(board, 3, 4, 3) == 0
assert gamma_move(board, 1, 3, 4) == 0
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_busy_fields(board, 2) == 2
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 2, 2) == 1
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_golden_move(board, 2, 1, 1) == 1
assert gamma_move(board, 3, 0, 2) == 1
assert gamma_move(board, 3, 0, 0) == 0
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_busy_fields(board, 2) == 4
assert gamma_move(board, 3, 0, 0) == 0
assert gamma_move(board, 3, 0, 3) == 1
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 3, 2, 4) == 0
assert gamma_free_fields(board, 3) == 3
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 1, 3, 4) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 1, 4, 4) == 0
assert gamma_move(board, 2, 3, 4) == 0
assert gamma_busy_fields(board, 2) == 4
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_golden_move(board, 2, 1, 2) == 0
gamma_delete(board)
| 31.56391
| 46
| 0.652692
| 758
| 4,198
| 3.455145
| 0.047493
| 0.357006
| 0.383734
| 0.511646
| 0.829324
| 0.82016
| 0.777396
| 0.69034
| 0.620848
| 0.593356
| 0
| 0.12252
| 0.183421
| 4,198
| 132
| 47
| 31.80303
| 0.641482
| 0
| 0
| 0.474138
| 0
| 0
| 0.017032
| 0
| 0
| 0
| 0
| 0
| 0.775862
| 1
| 0
| false
| 0
| 0.008621
| 0
| 0.008621
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7834428fd9aa6819e5b9eb1a6b22e142e2962b05
| 2,734
|
py
|
Python
|
Rprepper.py
|
TimNicholsonShaw/BG_GS_Nanopore_Analysis
|
6720ebc399cbb6635762998e241670cc078a9869
|
[
"MIT"
] | null | null | null |
Rprepper.py
|
TimNicholsonShaw/BG_GS_Nanopore_Analysis
|
6720ebc399cbb6635762998e241670cc078a9869
|
[
"MIT"
] | null | null | null |
Rprepper.py
|
TimNicholsonShaw/BG_GS_Nanopore_Analysis
|
6720ebc399cbb6635762998e241670cc078a9869
|
[
"MIT"
] | null | null | null |
from tools import samReader, samExtender, filterProperMapped, filterSenseStrand
import pandas as pd
def exportForRAnalysis(sam_file_locs, outFile, metadata=""):
# need to incorporate the ability to add metadata here
# metadata order must match sam_file_locs order
out_df = None
if metadata:
df = pd.read_csv(metadata)
cols = df.columns
assert len(sam_file_locs) == len(df)
for i in range(len(sam_file_locs)):
sam_df = samReader(sam_file_locs[i])
sam_df = samExtender(sam_df)
sam_df = filterProperMapped(sam_df)
md = df.loc[i]
for col in cols:
sam_df[col] = md[col]
print(sam_file_locs[i], "->", md[cols[0]])
if type(out_df) == pd.core.frame.DataFrame:
out_df = out_df.append(sam_df)
else:
out_df = sam_df
else:
for file in sam_file_locs:
sam_df = samReader(file)
sam_df = samExtender(sam_df)
sam_df = filterProperMapped(sam_df)
sam_df["SAMPLE"] = file[file.rfind("/")+1:]
if type(out_df) == pd.core.frame.DataFrame:
out_df = out_df.append(sam_df)
else:
out_df = sam_df
print(file, len(out_df))
out_df.to_csv(outFile, index=False)
return out_df
def exportForRAnalysisSenseStrandOnly(sam_file_locs, outFile, metadata=""):
# need to incorporate the ability to add metadata here
# metadata order must match sam_file_locs order
out_df = None
if metadata:
df = pd.read_csv(metadata)
cols = df.columns
assert len(sam_file_locs) == len(df)
for i in range(len(sam_file_locs)):
sam_df = samReader(sam_file_locs[i])
sam_df = samExtender(sam_df)
sam_df = filterSenseStrand(sam_df)
md = df.loc[i]
for col in cols:
sam_df[col] = md[col]
print(sam_file_locs[i], "->", md[cols[0]])
if type(out_df) == pd.core.frame.DataFrame:
out_df = out_df.append(sam_df)
else:
out_df = sam_df
else:
for file in sam_file_locs:
sam_df = samReader(file)
sam_df = samExtender(sam_df)
sam_df = filterSenseStrand(sam_df)
sam_df["SAMPLE"] = file[file.rfind("/")+1:]
if type(out_df) == pd.core.frame.DataFrame:
out_df = out_df.append(sam_df)
else:
out_df = sam_df
print(file, len(out_df))
out_df.to_csv(outFile, index=False)
return out_df
| 28.479167
| 79
| 0.554865
| 357
| 2,734
| 4.002801
| 0.165266
| 0.111966
| 0.107768
| 0.041987
| 0.89993
| 0.89993
| 0.89993
| 0.89993
| 0.89993
| 0.89993
| 0
| 0.002252
| 0.350402
| 2,734
| 95
| 80
| 28.778947
| 0.802365
| 0.072056
| 0
| 0.9375
| 0
| 0
| 0.007123
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 1
| 0.03125
| false
| 0
| 0.03125
| 0
| 0.09375
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
786c26ce39ac33ef2aa6b0d42e9a7b93fab86591
| 13,613
|
py
|
Python
|
tests/test_add.py
|
dsei210s19-applied-ml-and-dm/OPTMOD
|
85edea79284097cfaeeaa8eb0be3c9595b4a733f
|
[
"BSD-2-Clause"
] | 3
|
2019-02-14T23:45:06.000Z
|
2019-08-23T19:20:31.000Z
|
tests/test_add.py
|
dsei210s19-applied-ml-and-dm/OPTMOD
|
85edea79284097cfaeeaa8eb0be3c9595b4a733f
|
[
"BSD-2-Clause"
] | 21
|
2019-01-17T07:30:42.000Z
|
2020-04-11T18:26:43.000Z
|
tests/test_add.py
|
dsei210s19-applied-ml-and-dm/OPTMOD
|
85edea79284097cfaeeaa8eb0be3c9595b4a733f
|
[
"BSD-2-Clause"
] | 1
|
2022-01-21T23:56:42.000Z
|
2022-01-21T23:56:42.000Z
|
import optmod
import unittest
import numpy as np
class TestAdd(unittest.TestCase):
def test_contruction(self):
x = optmod.variable.VariableScalar(name='x')
f = optmod.function.add([x, optmod.expression.make_Expression(1.)])
self.assertEqual(f.name, 'add')
self.assertEqual(len(f.arguments), 2)
self.assertTrue(f.arguments[0] is x)
self.assertTrue(isinstance(f.arguments[1], optmod.constant.Constant))
self.assertEqual(f.arguments[1].get_value(), 1.)
self.assertRaises(AssertionError, optmod.function.add, [x])
self.assertRaises(AssertionError, optmod.function.add, [])
def test_constant_constant(self):
a = optmod.constant.Constant(4.)
b = optmod.constant.Constant(5.)
f = a + b
self.assertTrue(f.is_constant())
self.assertEqual(f.get_value(), 9.)
def test_scalar_scalar(self):
x = optmod.variable.VariableScalar(name='x', value=2.)
y = optmod.variable.VariableScalar(name='y', value=3.)
f = x + 1.
self.assertTrue(isinstance(f, optmod.function.add))
self.assertTrue(f.arguments[0] is x)
self.assertTrue(isinstance(f.arguments[1], optmod.constant.Constant))
self.assertEqual(f.arguments[1].get_value(), 1.)
self.assertEqual(f.get_value(), 3.)
self.assertEqual(str(f), 'x + %s' %optmod.utils.repr_number(1.))
f = 1. + x
self.assertTrue(isinstance(f, optmod.function.add))
self.assertTrue(f.arguments[0] is x)
self.assertTrue(isinstance(f.arguments[1], optmod.constant.Constant))
self.assertEqual(f.arguments[1].get_value(), 1.)
self.assertEqual(f.get_value(), 3.)
self.assertEqual(str(f), 'x + %s' %optmod.utils.repr_number(1.))
f = x + y
self.assertTrue(isinstance(f, optmod.function.add))
self.assertTrue(f.arguments[0] is x)
self.assertTrue(f.arguments[1] is y)
self.assertEqual(f.get_value(), 5.)
self.assertEqual(str(f), 'x + y')
f = 4. + x + y
self.assertTrue(isinstance(f, optmod.function.add))
self.assertTrue(isinstance(f.arguments[1], optmod.constant.Constant))
self.assertEqual(f.arguments[1].get_value(), 4.)
self.assertTrue(f.arguments[0] is x)
self.assertTrue(f.arguments[2] is y)
self.assertEqual(f.get_value(), 9.)
self.assertEqual(str(f), 'x + %s + y' %optmod.utils.repr_number(4.))
def test_scalar_matrix(self):
rn = optmod.utils.repr_number
value = [[1., 2., 3.], [4., 5., 6.]]
x = optmod.variable.VariableScalar(name='x', value=2.)
y = optmod.variable.VariableMatrix(name='y', value=value)
r = np.random.random((2,3))
f = x + r
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertTrue(isinstance(fij, optmod.function.add))
self.assertTrue(fij.arguments[0] is x)
self.assertEqual(fij.arguments[1].get_value(), r[i,j])
self.assertTrue(isinstance(f.get_value(), np.matrix))
self.assertTrue(np.all(f.get_value() == 2. + r))
self.assertEqual(str(f),
('[[ x + %s, x + %s, x + %s ],\n' %(rn(r[0,0]), rn(r[0,1]), rn(r[0,2])) +
' [ x + %s, x + %s, x + %s ]]\n' %(rn(r[1,0]), rn(r[1,1]), rn(r[1,2]))))
f = r + x
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertTrue(isinstance(fij, optmod.function.add))
self.assertTrue(fij.arguments[0] is x)
self.assertEqual(fij.arguments[1].get_value(), r[i,j])
self.assertTrue(isinstance(f.get_value(), np.matrix))
self.assertTrue(np.all(f.get_value() == 2. + r))
self.assertEqual(str(f),
('[[ x + %s, x + %s, x + %s ],\n' %(rn(r[0,0]), rn(r[0,1]), rn(r[0,2])) +
' [ x + %s, x + %s, x + %s ]]\n' %(rn(r[1,0]), rn(r[1,1]), rn(r[1,2]))))
f = x + np.matrix(r)
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTrue(np.all(f.get_value() == 2. + r))
self.assertEqual(str(f),
('[[ x + %s, x + %s, x + %s ],\n' %(rn(r[0,0]), rn(r[0,1]), rn(r[0,2])) +
' [ x + %s, x + %s, x + %s ]]\n' %(rn(r[1,0]), rn(r[1,1]), rn(r[1,2]))))
f = np.matrix(r) + x
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTrue(np.all(f.get_value() == 2. + r))
self.assertEqual(str(f),
('[[ x + %s, x + %s, x + %s ],\n' %(rn(r[0,0]), rn(r[0,1]), rn(r[0,2])) +
' [ x + %s, x + %s, x + %s ]]\n' %(rn(r[1,0]), rn(r[1,1]), rn(r[1,2]))))
f = y + 1
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertTrue(isinstance(fij, optmod.function.add))
self.assertTrue(fij.arguments[0] is y[i,j])
self.assertEqual(fij.arguments[1].get_value(), 1.)
self.assertTrue(isinstance(f.get_value(), np.matrix))
self.assertTrue(np.all(f.get_value() == np.array(value) + 1))
self.assertEqual(str(f),
('[[ y[0,0] + %s, y[0,1] + %s, y[0,2] + %s ],\n' %(rn(1), rn(1), rn(1)) +
' [ y[1,0] + %s, y[1,1] + %s, y[1,2] + %s ]]\n' %(rn(1), rn(1), rn(1))))
f = 1 + y
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTrue(np.all(f.get_value() == np.array(value) + 1))
self.assertEqual(str(f),
('[[ y[0,0] + %s, y[0,1] + %s, y[0,2] + %s ],\n' %(rn(1), rn(1), rn(1)) +
' [ y[1,0] + %s, y[1,1] + %s, y[1,2] + %s ]]\n' %(rn(1), rn(1), rn(1))))
f = x + y
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertTrue(isinstance(fij, optmod.function.add))
self.assertTrue(fij.arguments[0] is y[i,j])
self.assertTrue(fij.arguments[1] is x)
self.assertTrue(isinstance(f.get_value(), np.matrix))
self.assertTrue(np.all(f.get_value() == np.array(value) + 2.))
self.assertEqual(str(f),
('[[ y[0,0] + x, y[0,1] + x, y[0,2] + x ],\n' +
' [ y[1,0] + x, y[1,1] + x, y[1,2] + x ]]\n'))
f = y + x
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertTrue(isinstance(fij, optmod.function.add))
self.assertTrue(fij.arguments[0] is y[i,j])
self.assertTrue(fij.arguments[1] is x)
self.assertTrue(isinstance(f.get_value(), np.matrix))
self.assertTrue(np.all(f.get_value() == np.array(value) + 2.))
self.assertEqual(str(f),
('[[ y[0,0] + x, y[0,1] + x, y[0,2] + x ],\n' +
' [ y[1,0] + x, y[1,1] + x, y[1,2] + x ]]\n'))
f = (y + 1) + (3 + x)
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
for i in range(2):
for j in range(3):
self.assertEqual(str(f[i,j]), 'y[%d,%d] + %s + x + %s' %(i, j, rn(1), rn(3)))
self.assertTrue(f[i,j].arguments[0] is y[i,j])
self.assertTrue(f[i,j].arguments[1].is_constant())
self.assertTrue(f[i,j].arguments[2] is x)
self.assertTrue(f[i,j].arguments[3].is_constant())
self.assertTrue(np.all(f.get_value() == np.array(value) + 1. + 3. + 2.))
def test_matrix_matrix(self):
rn = optmod.utils.repr_number
value1 = [[1., 2., 3.], [4., 5., 6.]]
value2 = np.random.random((2,3))
x = optmod.variable.VariableMatrix(name='x', value=value1)
y = optmod.variable.VariableMatrix(name='y', value=value2)
f = x + value2
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTupleEqual(f.shape, (2,3))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertEqual(str(fij), 'x[%d,%d] + %s' %(i, j, rn(value2[i,j])))
self.assertTrue(np.all(f.get_value() == np.matrix(value1) + value2))
f = value2 + x
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTupleEqual(f.shape, (2,3))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertEqual(str(fij), 'x[%d,%d] + %s' %(i, j, rn(value2[i,j])))
self.assertTrue(np.all(f.get_value() == np.matrix(value1) + value2))
f = x + y
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTupleEqual(f.shape, (2,3))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertEqual(str(fij), 'x[%d,%d] + y[%d,%d]' %(i, j, i, j))
self.assertTrue(np.all(f.get_value() == np.matrix(value1) + value2))
f = y + x
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTupleEqual(f.shape, (2,3))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertEqual(str(fij), 'y[%d,%d] + x[%d,%d]' %(i, j, i, j))
self.assertTrue(np.all(f.get_value() == np.matrix(value1) + value2))
def test_zero(self):
x = optmod.variable.VariableScalar(name='x', value=3.)
f = x + 0
self.assertTrue(f is x)
f = 0 + x
self.assertTrue(f is x)
def test_derivative(self):
x = optmod.variable.VariableScalar(name='x', value=3.)
y = optmod.variable.VariableScalar(name='y', value=4.)
f = x + 1
fx = f.get_derivative(x)
fy = f.get_derivative(y)
self.assertTrue(isinstance(fx, optmod.constant.Constant))
self.assertEqual(fx.get_value(), 1.)
self.assertTrue(isinstance(fy, optmod.constant.Constant))
self.assertEqual(fy.get_value(), 0.)
f = x + y
fx = f.get_derivative(x)
fy = f.get_derivative(y)
self.assertTrue(isinstance(fx, optmod.constant.Constant))
self.assertEqual(fx.get_value(), 1.)
self.assertTrue(isinstance(fy, optmod.constant.Constant))
self.assertEqual(fy.get_value(), 1.)
f = (x + 1) + (x + 3) + (y + (x + 5.))
fx = f.get_derivative(x)
fy = f.get_derivative(y)
self.assertTrue(isinstance(fx, optmod.constant.Constant))
self.assertEqual(fx.get_value(), 3.)
self.assertTrue(isinstance(fy, optmod.constant.Constant))
self.assertEqual(fy.get_value(), 1.)
f = x + x
fx = f.get_derivative(x)
self.assertTrue(fx.is_constant(2.))
self.assertEqual(str(fx), optmod.utils.repr_number(2))
f1 = x + 1 + y
f2 = f1 + f1
f2x = f2.get_derivative(x)
f2y = f2.get_derivative(y)
self.assertEqual(f2.get_value(), 2.*(3.+1.+4.))
self.assertEqual(f2x.get_value(), 2.)
self.assertEqual(f2y.get_value(), 2.)
def test_analyze(self):
x = optmod.variable.VariableScalar('x')
y = optmod.variable.VariableScalar('y')
f = x + 1
prop = f.__analyze__()
self.assertTrue(prop['affine'])
self.assertEqual(prop['b'], 1.)
self.assertEqual(len(prop['a']), 1)
self.assertEqual(prop['a'][x], 1.)
f = 2 + x
prop = f.__analyze__()
self.assertTrue(prop['affine'])
self.assertEqual(prop['b'], 2.)
self.assertEqual(len(prop['a']), 1)
self.assertEqual(prop['a'][x], 1.)
f = x + y + x
prop = f.__analyze__()
self.assertTrue(prop['affine'])
self.assertEqual(prop['b'], 0.)
self.assertEqual(len(prop['a']), 2)
self.assertEqual(prop['a'][x], 2.)
self.assertEqual(prop['a'][y], 1.)
f = x + y + 10. + x
prop = f.__analyze__()
self.assertTrue(prop['affine'])
self.assertEqual(prop['b'], 10.)
self.assertEqual(len(prop['a']), 2)
self.assertEqual(prop['a'][x], 2.)
self.assertEqual(prop['a'][y], 1.)
def test_std_components(self):
x = optmod.variable.VariableScalar('x')
y = optmod.variable.VariableScalar('y')
f = x + y + x
comp = f.__get_std_components__()
phi = comp['phi']
gphi_list = comp['gphi_list']
Hphi_list = comp['Hphi_list']
self.assertTrue(phi is f)
self.assertEqual(len(gphi_list), 2)
v, exp = gphi_list[0]
self.assertTrue(v is x)
self.assertTrue(exp.is_constant())
self.assertEqual(exp.get_value(), 2.)
v, exp = gphi_list[1]
self.assertTrue(v is y)
self.assertTrue(exp.is_constant())
self.assertEqual(exp.get_value(), 1.)
self.assertEqual(len(Hphi_list), 0)
| 40.275148
| 98
| 0.52751
| 1,885
| 13,613
| 3.752785
| 0.048276
| 0.160305
| 0.12553
| 0.091886
| 0.859061
| 0.835171
| 0.802092
| 0.757422
| 0.752474
| 0.739468
| 0
| 0.030873
| 0.293323
| 13,613
| 337
| 99
| 40.394659
| 0.70447
| 0
| 0
| 0.66426
| 0
| 0.028881
| 0.057078
| 0
| 0
| 0
| 0
| 0
| 0.527076
| 1
| 0.032491
| false
| 0
| 0.01083
| 0
| 0.046931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7882f75ee87c271d06ff6a6bfbbaf202f0fec4fe
| 27,010
|
py
|
Python
|
layint_api/apis/analytics_api.py
|
LayeredInsight/layint_api_python
|
a5c9a5b24098bd823c5102b7ab9e4745432f19b4
|
[
"Apache-2.0"
] | null | null | null |
layint_api/apis/analytics_api.py
|
LayeredInsight/layint_api_python
|
a5c9a5b24098bd823c5102b7ab9e4745432f19b4
|
[
"Apache-2.0"
] | null | null | null |
layint_api/apis/analytics_api.py
|
LayeredInsight/layint_api_python
|
a5c9a5b24098bd823c5102b7ab9e4745432f19b4
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Layered Insight Assessment, Compliance, Witness & Control
LI Assessment & Compliance performs static vulnerability analysis, license and package compliance. LI Witness provides deep insight and analytics into containerized applications. Control provides dynamic runtime security and analytics for containerized applications. You can find out more about the Layered Insight Suite at [http://layeredinsight.com](http://layeredinsight.com).
OpenAPI spec version: 0.10
Contact: help@layeredinsight.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class AnalyticsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def cve_search(self, cve_search_field, **kwargs):
"""
Searches image scan results for specified CVE ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cve_search(cve_search_field, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CveSearchField cve_search_field: (required)
:return: CveSearch
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.cve_search_with_http_info(cve_search_field, **kwargs)
else:
(data) = self.cve_search_with_http_info(cve_search_field, **kwargs)
return data
def cve_search_with_http_info(self, cve_search_field, **kwargs):
"""
Searches image scan results for specified CVE ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cve_search_with_http_info(cve_search_field, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CveSearchField cve_search_field: (required)
:return: CveSearch
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cve_search_field']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cve_search" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cve_search_field' is set
if ('cve_search_field' not in params) or (params['cve_search_field'] is None):
raise ValueError("Missing the required parameter `cve_search_field` when calling `cve_search`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cve_search_field' in params:
body_params = params['cve_search_field']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Scan/CveSearch', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CveSearch',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_stats(self, **kwargs):
"""
Gets vulnerability statistics for user's images
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_stats(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param TagNames tags: Tag name(s) to filter results
:return: VulnerabilityStats
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_stats_with_http_info(**kwargs)
else:
(data) = self.get_stats_with_http_info(**kwargs)
return data
def get_stats_with_http_info(self, **kwargs):
"""
Gets vulnerability statistics for user's images
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_stats_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param TagNames tags: Tag name(s) to filter results
:return: VulnerabilityStats
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tags']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_stats" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'tags' in params:
body_params = params['tags']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Scan/Stats', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VulnerabilityStats',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def package_search(self, **kwargs):
"""
Searches for images with a specified software package
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.package_search(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param PackageSearchData package_search_data:
:return: PackageSearchResults
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.package_search_with_http_info(**kwargs)
else:
(data) = self.package_search_with_http_info(**kwargs)
return data
def package_search_with_http_info(self, **kwargs):
"""
Searches for images with a specified software package
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.package_search_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param PackageSearchData package_search_data:
:return: PackageSearchResults
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['package_search_data']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method package_search" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'package_search_data' in params:
body_params = params['package_search_data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Scan/PackageSearch', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PackageSearchResults',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def stats_history_get(self, starting_date, ending_date, **kwargs):
"""
Get vulnerability history over a time period
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.stats_history_get(starting_date, ending_date, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str starting_date: Start date for period of interest (required)
:param str ending_date: End date for period of interest (required)
:return: StatsHistory
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.stats_history_get_with_http_info(starting_date, ending_date, **kwargs)
else:
(data) = self.stats_history_get_with_http_info(starting_date, ending_date, **kwargs)
return data
def stats_history_get_with_http_info(self, starting_date, ending_date, **kwargs):
"""
Get vulnerability history over a time period
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.stats_history_get_with_http_info(starting_date, ending_date, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str starting_date: Start date for period of interest (required)
:param str ending_date: End date for period of interest (required)
:return: StatsHistory
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['starting_date', 'ending_date']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method stats_history_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'starting_date' is set
if ('starting_date' not in params) or (params['starting_date'] is None):
raise ValueError("Missing the required parameter `starting_date` when calling `stats_history_get`")
# verify the required parameter 'ending_date' is set
if ('ending_date' not in params) or (params['ending_date'] is None):
raise ValueError("Missing the required parameter `ending_date` when calling `stats_history_get`")
collection_formats = {}
path_params = {}
if 'starting_date' in params:
path_params['StartingDate'] = params['starting_date']
if 'ending_date' in params:
path_params['EndingDate'] = params['ending_date']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Scan/StatsHistory', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StatsHistory',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def top_cves(self, **kwargs):
"""
Returns a list of most common/severe vulnerabilities present in a users images
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.top_cves(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CveSearchData cve_search_data: Number of vulnerable images to return
:return: TopCves
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.top_cves_with_http_info(**kwargs)
else:
(data) = self.top_cves_with_http_info(**kwargs)
return data
def top_cves_with_http_info(self, **kwargs):
"""
Returns a list of most common/severe vulnerabilities present in a users images
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.top_cves_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CveSearchData cve_search_data: Number of vulnerable images to return
:return: TopCves
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cve_search_data']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method top_cves" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cve_search_data' in params:
body_params = params['cve_search_data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Scan/TopCves', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TopCves',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def top_vuln_images(self, **kwargs):
"""
Returns a list of most vulnerable images
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.top_vuln_images(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param VulnerableImageData vulnerable_image_data:
:return: TopVulnerableImages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.top_vuln_images_with_http_info(**kwargs)
else:
(data) = self.top_vuln_images_with_http_info(**kwargs)
return data
def top_vuln_images_with_http_info(self, **kwargs):
"""
Returns a list of most vulnerable images
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.top_vuln_images_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param VulnerableImageData vulnerable_image_data:
:return: TopVulnerableImages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['vulnerable_image_data']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method top_vuln_images" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'vulnerable_image_data' in params:
body_params = params['vulnerable_image_data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Scan/TopVulnImages', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TopVulnerableImages',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 40.86233
| 383
| 0.570455
| 2,689
| 27,010
| 5.47936
| 0.086649
| 0.065155
| 0.022804
| 0.02932
| 0.90186
| 0.884146
| 0.870639
| 0.847156
| 0.831818
| 0.824759
| 0
| 0.000343
| 0.35324
| 27,010
| 660
| 384
| 40.924242
| 0.843093
| 0.332729
| 0
| 0.725309
| 0
| 0
| 0.154601
| 0.027971
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040123
| false
| 0
| 0.021605
| 0
| 0.12037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
789509635ce2f8be8443254e3eeec147a95b9d73
| 84,146
|
py
|
Python
|
ME-decompiled.py
|
angelsave/reverse_enginering
|
3d6b46e898535ed162cdcb1b03a90761dcb3bce4
|
[
"MIT"
] | 7
|
2018-05-04T08:28:58.000Z
|
2019-11-06T08:49:02.000Z
|
ME-decompiled.py
|
angelsave/reverse_enginering
|
3d6b46e898535ed162cdcb1b03a90761dcb3bce4
|
[
"MIT"
] | 1
|
2019-09-21T07:29:50.000Z
|
2019-09-22T16:53:04.000Z
|
ME-decompiled.py
|
angelsave/reverse_enginering
|
3d6b46e898535ed162cdcb1b03a90761dcb3bce4
|
[
"MIT"
] | 8
|
2018-05-04T08:28:59.000Z
|
2020-09-27T17:21:28.000Z
|
# uncompyle6 version 3.1.3
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.4.3 (default, Aug 9 2016, 15:36:17)
# [GCC 5.3.1 20160406 (Red Hat 5.3.1-6)]
# Embedded file name: ME.pye
# Compiled at: 2018-02-25 14:25:47
import marshal
exec marshal.loads('c\x00\x00\x00\x00\x00\x00\x00\x001\x00\x00@\x00\x00\x00s\x0e\x00\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00d\x02\x00d\x03\x00d\x04\x00d\x05\x00d\x06\x00d\x07\x00d\x08\x00d\t\x00d\n\x00d\x0b\x00d\x0c\x00d\r\x00d\x0e\x00d\x0f\x00d\x10\x00d\x11\x00d\x12\x00d\x11\x00d\n\x00d\x13\x00d\x14\x00d\x15\x00d\x16\x00d\x17\x00d\x18\x00d\x19\x00d\x1a\x00d\x1b\x00d\x1c\x00d\x1d\x00d\x13\x00d\x1e\x00d\x1f\x00d \x00d!\x00d"\x00d#\x00d$\x00d\x15\x00d%\x00d&\x00d\'\x00d(\x00d)\x00d\x1f\x00d*\x00d\x0b\x00d+\x00d,\x00d-\x00d.\x00d/\x00d0\x00d&\x00d1\x00d2\x00d3\x00d4\x00d5\x00d6\x00d7\x00d8\x00d\x07\x00d9\x00d:\x00d;\x00d<\x00d=\x00d>\x00d?\x00d\x0e\x00d@\x00dA\x00dB\x00dC\x00d+\x00dD\x00d\x06\x00d$\x00dE\x00d\x05\x00dF\x00dG\x00dH\x00dI\x00dJ\x00dK\x00dL\x00dM\x00dN\x00d\x1b\x00dC\x00d\x0b\x00dO\x00dP\x00dQ\x00dR\x00dS\x00dT\x00d#\x00d\x11\x00d\x11\x00dU\x00dV\x00dW\x00dX\x00d+\x00dY\x00dF\x00d\'\x00dZ\x00d[\x00d\\\x00d]\x00d^\x00d_\x00d`\x00da\x00d1\x00dB\x00db\x00d_\x00d<\x00dc\x00dH\x00d\x1e\x00dd\x00dd\x00de\x00d]\x00df\x00dD\x00d%\x00dI\x00dg\x00dh\x00di\x00d,\x00dj\x00dk\x00dl\x00d\x17\x00dm\x00dn\x00do\x00dp\x00dq\x00dr\x00ds\x00dt\x00du\x00dA\x00dv\x00dC\x00d2\x00dw\x00dN\x00dx\x00dy\x00dz\x00d{\x00dz\x00d|\x00d \x00dB\x00dU\x00dE\x00d}\x00d~\x00dO\x00d\x7f\x00dG\x00dC\x00d6\x00d\x00dF\x00d\x7f\x00d\x00d\x00dW\x00d\x00d\x00dJ\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00dr\x00d\x00d\x00d%\x00d\x05\x00dZ\x00d\x1c\x00d5\x00dy\x00d\'\x00d{\x00d\x00d[\x00d^\x00d\x00d\x17\x00d\x00d\x00d4\x00dY\x00de\x00d\x00dE\x00d\x00dL\x00d\x00d\x00dS\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x0c\x00d\x00d\x00d\x11\x00dN\x00da\x00d\x00d-\x00dr\x00d\x00d1\x00d\x00d\x00d{\x00d+\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00dI\x00d\x00d\x00d\x11\x00dn\x00dw\x00d\x00d\x00d\x00dt\x00d\x05\x00d3\x00d>\x00d\x08\x00di\x00d\x00d[\x00d\x00d\\\x00d\x0e\x00d\\\x00d0\x00d\x11\x00d\x00d\x00d\x00d\x00d\x10\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00di\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x00d(\x00d)\x00di\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x00d/\x00d0\x00d&\x00d1\x00d2\x00d1\x00d4\x00d5\x00d\x00d7\x00d8\x00d\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x00dF\x00dG\x00d1\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x16\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d\x11\x00dU\x00dV\x00d\x00dX\x00d+\x00d\x00dF\x00d\'\x00d\x00d[\x00d\\\x00dU\x00d^\x00d_\x00d\x00da\x00d1\x00d\x00db\x00d_\x00d\x00dc\x00dH\x00d\x00dd\x00dd\x00dA\x00d]\x00df\x00d\x00d%\x00dI\x00d\x00dh\x00di\x00d~\x00dj\x00dk\x00du\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00dr\x00du\x00dA\x00d\x00dC\x00d2\x00d\x00dN\x00dx\x00d\x00dz\x00d{\x00dV\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x1e\x00dG\x00dC\x00d\x00d\x00dF\x00db\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00dB\x00d\x00d\x00d\x00d\x05\x00dZ\x00dX\x00d5\x00dy\x00dQ\x00d{\x00d\x00d\x00d^\x00d\x00d\x00d\x00d\x00d\x00dY\x00de\x00d\x00dE\x00d\x00dj\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00d\x00dN\x00da\x00d\t\x00d-\x00dr\x00dW\x00d1\x00d\x00d\x00d{\x00d+\x00dI\x00d\x00d\x19\x00dd\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00d\\\x00d\x08\x00di\x00d\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00d\x00d\x11\x00d\x00d\x00d\x00d\x00dj\x00d\x15\x00d\x16\x00dS\x00d\x18\x00d\x19\x00d>\x00d\x1b\x00d\x00dn\x00d\x13\x00d\x00d(\x00d \x00d!\x00d0\x00d#\x00d$\x00dK\x00d%\x00d&\x00d\x00d(\x00d)\x00d\x19\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00dy\x00d/\x00d0\x00d\x00d1\x00d2\x00d*\x00d4\x00d5\x00d\x00d7\x00d8\x00d\x0b\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\r\x00dA\x00dB\x00d\x00d+\x00dD\x00d;\x00d$\x00dE\x00d\x00dF\x00dG\x00dL\x00dI\x00dJ\x00dP\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d\x1e\x00dO\x00dP\x00d$\x00dR\x00dS\x00d4\x00d#\x00d\x11\x00d\x1b\x00dU\x00dV\x00d\x00dX\x00d+\x00d\n\x00dF\x00d\'\x00d\x00d[\x00d\\\x00d\x12\x00d^\x00d_\x00d\x00da\x00d1\x00d\x00db\x00d_\x00d\x0e\x00dc\x00dH\x00dp\x00dd\x00dd\x00d\x00d]\x00df\x00dP\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d2\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00d\x00du\x00dA\x00d\x00dC\x00d2\x00dH\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x13\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x1a\x00dG\x00dC\x00dL\x00d\x00dF\x00de\x00d\x00d\x00d\x00d\x00d\x00d\x11\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00d^\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d8\x00d{\x00d\x00d&\x00d^\x00d\x00d\x03\x00d\x00d\x00dz\x00dY\x00de\x00dO\x00dE\x00d\x00d\x00d\x00d\x00dZ\x00d\x00d\x00d\'\x00d6\x00d.\x00d\x00d`\x00d.\x00d \x00d\x00d\x00d+\x00dN\x00da\x00d}\x00d-\x00dr\x00d\x00d1\x00d\x00d\x00d{\x00d+\x00d\n\x00d\x00d\x19\x00d\x0f\x00d\x00d\x00d\\\x00d\x00d\x00d\x19\x00d\x00d\x00dW\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00d)\x00d\x08\x00di\x00d\x00d[\x00d\x00d}\x00d\x0e\x00d\\\x00d}\x00d\x11\x00d\x00d\x18\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00dX\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00d]\x00d(\x00d)\x00d\x1f\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\t\x00d/\x00d0\x00d&\x00d1\x00d2\x00d\x00d4\x00d5\x00d\x00d7\x00d8\x00d\x00d9\x00d:\x00d\x00d<\x00d=\x00dB\x00d?\x00d\x0e\x00d@\x00dA\x00dB\x00d\x00d+\x00dD\x00d\x16\x00d$\x00dE\x00dv\x00dF\x00dG\x00dH\x00dI\x00dJ\x00d\x00dL\x00dM\x00dA\x00d\x1b\x00dC\x00d!\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x0f\x00d#\x00d\x11\x00d\x12\x00dU\x00dV\x00d\x00dX\x00d+\x00dY\x00dF\x00d\'\x00d5\x00d[\x00d\\\x00d\x00d^\x00d_\x00d\x00da\x00d1\x00dn\x00db\x00d_\x00d7\x00dc\x00dH\x00d?\x00dd\x00dd\x00d\x00d]\x00df\x00dD\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d\x05\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00d\x00du\x00dA\x00d\x00dC\x00d2\x00d\x00dN\x00dx\x00d7\x00dz\x00d{\x00d\x00d|\x00d \x00dp\x00dU\x00dE\x00d~\x00d~\x00dO\x00d\x7f\x00dG\x00dC\x00d6\x00d\x00dF\x00d&\x00d\x00d\x00dW\x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00dr\x00d\x00d\x00d:\x00d\x05\x00dZ\x00dI\x00d5\x00dy\x00d\'\x00d{\x00d\x00d\x00d^\x00d\x00d\x00d\x00d\x00d\x00dY\x00de\x00d\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1d\x00d6\x00d.\x00d&\x00d`\x00d.\x00d\x0c\x00d\x00d\x00d\x00dN\x00da\x00d\x00d-\x00dr\x00d\x00d1\x00d\x00d\x00d{\x00d+\x00dJ\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d;\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00dj\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00d \x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d&\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00dS\x00d\x13\x00d\x00d!\x00d \x00d!\x00dM\x00d#\x00d$\x00d3\x00d%\x00d&\x00dV\x00d(\x00d)\x00d\x04\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x00d/\x00d0\x00dm\x00d1\x00d2\x00d1\x00d4\x00d5\x00d\x7f\x00d7\x00d8\x00d\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d7\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x00dF\x00dG\x00d\x00dI\x00dJ\x00d\x00dL\x00dM\x00d}\x00d\x1b\x00dC\x00d\x18\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x19\x00d#\x00d\x11\x00dO\x00dU\x00dV\x00d\x00dX\x00d+\x00d\x00dF\x00d\'\x00d\x07\x00d[\x00d\\\x00dU\x00d^\x00d_\x00d\x00da\x00d1\x00d|\x00db\x00d_\x00d\x00dc\x00dH\x00d\x00dd\x00dd\x00d\x00d]\x00df\x00d\x00d%\x00dI\x00d\x00dh\x00di\x00dN\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00d\x00du\x00dA\x00d\x00dC\x00d2\x00d\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x10\x00d|\x00d \x00d\x00dU\x00dE\x00d.\x00d~\x00dO\x00d\x18\x00dG\x00dC\x00d\x00d\x00dF\x00d\x00d\x00d\x00d^\x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x04\x00dS\x00d:\x00d2\x00d\x00d\x00d\x00d\x05\x00dZ\x00dJ\x00d5\x00dy\x00d\x00d{\x00d\x00d\x00d^\x00d\x00ds\x00d\x00d\x00d\x17\x00dY\x00de\x00d\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00d\x00dN\x00da\x00d\x00d-\x00dr\x00dW\x00d1\x00d\x00dH\x00d{\x00d+\x00dI\x00d\x00d\x19\x00d\x00d\x00d\x00dK\x00d\x00d\x00d\x00d\x00d\x00d\x19\x00dn\x00dw\x00d\x04\x00d\x00d\x00d\x00d\x05\x00d3\x00dg\x00d\x08\x00di\x00d\x11\x00d[\x00d\x00d(\x00d\x0e\x00d\\\x00dQ\x00d\x11\x00d\x00d\x00d\x00d\x00dT\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d>\x00d\x1b\x00d\x00d\x1a\x00d\x13\x00d\x00d:\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x00d(\x00d)\x00d%\x00d*\x00d\x0b\x00dU\x00d,\x00d-\x00d\x00d/\x00d0\x00d\t\x00d1\x00d2\x00dr\x00d4\x00d5\x00d\n\x00d7\x00d8\x00d\r\x00d9\x00d:\x00d4\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00dr\x00dA\x00dB\x00d\x00d+\x00dD\x00d;\x00d$\x00dE\x00d\x00dF\x00dG\x00dT\x00dI\x00dJ\x00dP\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d\x1e\x00dO\x00dP\x00d$\x00dR\x00dS\x00d-\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d#\x00dX\x00d+\x00d)\x00dF\x00d\'\x00d\x00d[\x00d\\\x00d\x12\x00d^\x00d_\x00d\x00da\x00d1\x00d\x0f\x00db\x00d_\x00d\x00dc\x00dH\x00d\x00dd\x00dd\x00d\x02\x00d]\x00df\x00d\x0c\x00d%\x00dI\x00dd\x00dh\x00di\x00d\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d(\x00do\x00dp\x00d\x00dr\x00ds\x00dM\x00du\x00dA\x00d\x00dC\x00d2\x00d\x00dN\x00dx\x00d\\\x00dz\x00d{\x00d\x00d|\x00d \x00d\x00dU\x00dE\x00d>\x00d~\x00dO\x00dx\x00dG\x00dC\x00d\x1b\x00d\x00dF\x00d\x00d\x00d\x00dW\x00d\x00d\x00di\x00d\x1b\x00d]\x00d9\x00dS\x00d:\x00d\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d8\x00d{\x00d\x00d,\x00d^\x00d\x00d\x00d\x00d\x00dB\x00dY\x00de\x00d\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00d \x00d\x00d\x00d\x00dN\x00da\x00dl\x00d-\x00dr\x00d\x00d1\x00d\x00d\x00d{\x00d+\x00d\n\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00dA\x00d\x08\x00di\x00d?\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00d\x00d\x11\x00d\x00d?\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d$\x00d\x13\x00d\x00dp\x00d \x00d!\x00d\x19\x00d#\x00d$\x00d\x15\x00d%\x00d&\x00d\x00d(\x00d)\x00d{\x00d*\x00d\x0b\x00d+\x00d,\x00d-\x00d\x00d/\x00d0\x00d&\x00d1\x00d2\x00d3\x00d4\x00d5\x00d\x00d7\x00d8\x00d\x00d9\x00d:\x00d;\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x1c\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x00dF\x00dG\x00dH\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d!\x00dO\x00dP\x00d\x00dR\x00dS\x00d3\x00d#\x00d\x11\x00d\x12\x00dU\x00dV\x00d\x00dX\x00d+\x00d\x00dF\x00d\'\x00d5\x00d[\x00d\\\x00dD\x00d^\x00d_\x00d\x00da\x00d1\x00d.\x00db\x00d_\x00d<\x00dc\x00dH\x00d\x00dd\x00dd\x00d6\x00d]\x00df\x00dD\x00d%\x00dI\x00d\x0f\x00dh\x00di\x00dF\x00dj\x00dk\x00d\x05\x00d\x17\x00dm\x00dn\x00do\x00dp\x00d\x00dr\x00ds\x00d\t\x00du\x00dA\x00d+\x00dC\x00d2\x00d\x00dN\x00dx\x00d\r\x00dz\x00d{\x00d\x00d|\x00d \x00d\x00dU\x00dE\x00d;\x00d~\x00dO\x00d\x15\x00dG\x00dC\x00d@\x00d\x00dF\x00d\x00d\x00d\x00d}\x00d\x00d\x00d\x0f\x00d\x1b\x00d]\x00d\x1e\x00dS\x00d:\x00d<\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d\x00d{\x00d\x00d\x00d^\x00d\x00d\x1b\x00d\x00d\x00d\x00dY\x00de\x00d=\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00dM\x00d`\x00d.\x00df\x00d\x00d\x00d\x11\x00dN\x00da\x00d[\x00d-\x00dr\x00dv\x00d1\x00d\x00d\x00d{\x00d+\x00dd\x00d\x00d\x19\x00d/\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d;\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00d \x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d&\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00dX\x00d\x13\x00d\x00d\x00d \x00d!\x00d9\x00d#\x00d$\x00dk\x00d%\x00d&\x00d^\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d\x0e\x00d,\x00d-\x00dk\x00d/\x00d0\x00d\x00d1\x00d2\x00d\x19\x00d4\x00d5\x00d\x00d7\x00d8\x00dX\x00d9\x00d:\x00d;\x00d<\x00d=\x00dv\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x00d+\x00dD\x00db\x00d$\x00dE\x00d\x00dF\x00dG\x00d\x00dI\x00dJ\x00d\x00dL\x00dM\x00d}\x00d\x1b\x00dC\x00d&\x00dO\x00dP\x00dD\x00dR\x00dS\x00d\x19\x00d#\x00d\x11\x00d\x14\x00dU\x00dV\x00d\x00dX\x00d+\x00d,\x00dF\x00d\'\x00d\x1b\x00d[\x00d\\\x00db\x00d^\x00d_\x00d\x00da\x00d1\x00d\x00db\x00d_\x00d%\x00dc\x00dH\x00d_\x00dd\x00dd\x00d\x00d]\x00df\x00d\x00d%\x00dI\x00dC\x00dh\x00di\x00dN\x00dj\x00dk\x00dn\x00d\x17\x00dm\x00d\x00do\x00dp\x00dL\x00dr\x00ds\x00dp\x00du\x00dA\x00d\x00dC\x00d2\x00dM\x00dN\x00dx\x00d>\x00dz\x00d{\x00dV\x00d|\x00d \x00dh\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x00dG\x00dC\x00d\x00d\x00dF\x00d\x1c\x00d\x00d\x00d \x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00d>\x00d\x00d\x00d\x00d\x05\x00dZ\x00dJ\x00d5\x00dy\x00dJ\x00d{\x00d\x00d\x00d^\x00d\x00d\x00d\x00d\x00d\x02\x00dY\x00de\x00dP\x00dE\x00d\x00dj\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00d\x18\x00dN\x00da\x00d\t\x00d-\x00dr\x00d\x00d1\x00d\x00d%\x00d{\x00d+\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x17\x00d\x00d\x00d\x00dn\x00dw\x00d\x04\x00d\x00d\x00d-\x00d\x05\x00d3\x00dg\x00d\x08\x00di\x00dj\x00d[\x00d\x00d.\x00d\x0e\x00d\\\x00dQ\x00d\x11\x00d\x00d\x0e\x00d\x00d\x00dx\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00di\x00d\x1b\x00d\x00dd\x00d\x13\x00d\x00d\x00d \x00d!\x00d\x00d#\x00d$\x00dP\x00d%\x00d&\x00d\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x00d/\x00d0\x00dc\x00d1\x00d2\x00d\x00d4\x00d5\x00d\x7f\x00d7\x00d8\x00dM\x00d9\x00d:\x00d\x00d<\x00d=\x00dH\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x00dF\x00dG\x00d\x00dI\x00dJ\x00dP\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00dg\x00dO\x00dP\x00d$\x00dR\x00dS\x00d7\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d#\x00dX\x00d+\x00d\x00dF\x00d\'\x00d\x1c\x00d[\x00d\\\x00d\x00d^\x00d_\x00d\x00da\x00d1\x00d\x00db\x00d_\x00d\x00dc\x00dH\x00dp\x00dd\x00dd\x00d`\x00d]\x00df\x00d*\x00d%\x00dI\x00d\x00dh\x00di\x00d\x02\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d\x00do\x00dp\x00dJ\x00dr\x00ds\x00d\x00du\x00dA\x00d\x00dC\x00d2\x00dY\x00dN\x00dx\x00d\x00dz\x00d{\x00d_\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x00dG\x00dC\x00d\x1a\x00d\x00dF\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1b\x00d]\x00d*\x00dS\x00d:\x00dV\x00d\x00d\x00d\t\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00dA\x00d{\x00d\x00d&\x00d^\x00d\x00d\x1c\x00d\x00d\x00d^\x00dY\x00de\x00d\x00dE\x00d\x00dz\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00d \x00d\x00d\x00d6\x00dN\x00da\x00d\x04\x00d-\x00dr\x00dE\x00d1\x00d\x00d\x00d{\x00d+\x00d\n\x00d\x00d\x19\x00d\x12\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00dS\x00dn\x00dw\x00dP\x00d\x00d\x00d\x00d\x05\x00d3\x00dh\x00d\x08\x00di\x00d\x00d[\x00d\x00dk\x00d\x0e\x00d\\\x00d\x0f\x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00de\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d$\x00d\x13\x00d\x00d\x1a\x00d \x00d!\x00dl\x00d#\x00d$\x00dG\x00d%\x00d&\x00dD\x00d(\x00d)\x00d\x1f\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x06\x00d/\x00d0\x00dW\x00d1\x00d2\x00d3\x00d4\x00d5\x00dy\x00d7\x00d8\x00d\x00d9\x00d:\x00d\x00d<\x00d=\x00d>\x00d?\x00d\x0e\x00df\x00dA\x00dB\x00d\x00d+\x00dD\x00d\x11\x00d$\x00dE\x00d\x05\x00dF\x00dG\x00dH\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00dS\x00dR\x00dS\x00d\x00d#\x00d\x11\x00dd\x00dU\x00dV\x00d\n\x00dX\x00d+\x00d\x00dF\x00d\'\x00d\x00d[\x00d\\\x00d\x00d^\x00d_\x00d`\x00da\x00d1\x00dn\x00db\x00d_\x00d\x1d\x00dc\x00dH\x00d\x1c\x00dd\x00dd\x00d\x00d]\x00df\x00dD\x00d%\x00dI\x00dg\x00dh\x00di\x00d\x00dj\x00dk\x00d\x19\x00d\x17\x00dm\x00dE\x00do\x00dp\x00d\x00dr\x00ds\x00d.\x00du\x00dA\x00dd\x00dC\x00d2\x00d\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x7f\x00dG\x00dC\x00d\x00d\x00dF\x00d\x00d\x00d\x00dW\x00d\x00d\x00d\x0f\x00d\x1b\x00d]\x00d;\x00dS\x00d:\x00dw\x00d\x00d\x00d\x04\x00d\x05\x00dZ\x00dD\x00d5\x00dy\x00dm\x00d{\x00d\x00d)\x00d^\x00d\x00d\x07\x00d\x00d\x00d4\x00dY\x00de\x00d=\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d8\x00d6\x00d.\x00d}\x00d`\x00d.\x00d\x0c\x00d\x00d\x00d\x11\x00dN\x00da\x00d\x00d-\x00dr\x00dj\x00d1\x00d\x00d\x00d{\x00d+\x00dR\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d,\x00d\x00d\x00dS\x00dn\x00dw\x00d\x00d\x00d\x00d\x1a\x00d\x05\x00d3\x00d>\x00d\x08\x00di\x00d\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00dK\x00d\x11\x00d\x00d\x00d\x00d\x00d\x15\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00dV\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x00d/\x00d0\x00dg\x00d1\x00d2\x00d(\x00d4\x00d5\x00d\x7f\x00d7\x00d8\x00dX\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x00d+\x00dD\x00d?\x00d$\x00dE\x00d7\x00dF\x00dG\x00d\x00dI\x00dJ\x00d0\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x19\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d\x00dX\x00d+\x00d,\x00dF\x00d\'\x00d{\x00d[\x00d\\\x00dU\x00d^\x00d_\x00d\x1f\x00da\x00d1\x00d|\x00db\x00d_\x00d\x00dc\x00dH\x00d\x14\x00dd\x00dd\x00d\x00d]\x00df\x00d\x00d%\x00dI\x00d\x00dh\x00di\x00dN\x00dj\x00dk\x00dn\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00dC\x00du\x00dA\x00d\x00dC\x00d2\x00d\x00dN\x00dx\x00d>\x00dz\x00d{\x00d{\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d"\x00dG\x00dC\x00d\x00d\x00dF\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x7f\x00dS\x00d:\x00d\x06\x00d\x00d\x00d\x00d\x05\x00dZ\x00d2\x00d5\x00dy\x00dO\x00d{\x00d\x00d\x04\x00d^\x00d\x00d\x00d\x00d\x00d\x00dY\x00de\x00d\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00d\x0c\x00dN\x00da\x00d\x00d-\x00dr\x00d=\x00d1\x00d\x00d\x00d{\x00d+\x00d\x00d\x00d\x19\x00d\x0f\x00d\x00d\x00dK\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d*\x00d\x05\x00d3\x00dg\x00d\x08\x00di\x00d\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00dQ\x00d\x11\x00d\x00d\x00d\x00d\x00dX\x00d\x15\x00d\x16\x00d\x03\x00d\x18\x00d\x19\x00d(\x00d\x1b\x00d\x00de\x00d\x13\x00d\x00d\x05\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x17\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x1c\x00d/\x00d0\x00d\x00d1\x00d2\x00dr\x00d4\x00d5\x00d`\x00d7\x00d8\x00d\x10\x00d9\x00d:\x00d4\x00d<\x00d=\x00d+\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x00dF\x00dG\x00d$\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d_\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d\x00dU\x00dV\x00dr\x00dX\x00d+\x00d\n\x00dF\x00d\'\x00d\x00d[\x00d\\\x00d\x12\x00d^\x00d_\x00d\x00da\x00d1\x00d\x00db\x00d_\x00d\x0f\x00dc\x00dH\x00dp\x00dd\x00dd\x00d\x7f\x00d]\x00df\x00d\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d\x1e\x00do\x00dp\x00d\x00dr\x00ds\x00d\x7f\x00du\x00dA\x00dJ\x00dC\x00d2\x00dH\x00dN\x00dx\x00d\\\x00dz\x00d{\x00d\x00d|\x00d \x00d\x00dU\x00dE\x00d>\x00d~\x00dO\x00d\x00dG\x00dC\x00dL\x00d\x00dF\x00d\x00d\x00d\x00dV\x00d\x00d\x00d\x00d\x1b\x00d]\x00d9\x00dS\x00d:\x00d.\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d\x00d{\x00d\x00d\x00d^\x00d\x00d\x00d\x00d\x00d=\x00dY\x00de\x00dO\x00dE\x00d\x00dz\x00d\x00d\x00da\x00d\x00d\x00d\'\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00d=\x00dN\x00da\x00dT\x00d-\x00dr\x00d\x00d1\x00d\x00d(\x00d{\x00d+\x00d;\x00d\x00d\x19\x00dm\x00d\x00d\x00d\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x00d[\x00d\x00dk\x00d\x0e\x00d\\\x00d}\x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d\x07\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d7\x00d\x13\x00d\x00d\x00d \x00d!\x00d6\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d+\x00d,\x00d-\x00d.\x00d/\x00d0\x00d&\x00d1\x00d2\x00d\x10\x00d4\x00d5\x00d|\x00d7\x00d8\x00d\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d@\x00dA\x00dB\x00d\x1c\x00d+\x00dD\x00d\x06\x00d$\x00dE\x00d\x00dF\x00dG\x00d|\x00dI\x00dJ\x00d~\x00dL\x00dM\x00d\x06\x00d\x1b\x00dC\x00dw\x00dO\x00dP\x00d1\x00dR\x00dS\x00d/\x00d#\x00d\x11\x00d/\x00dU\x00dV\x00d(\x00dX\x00d+\x00dY\x00dF\x00d\'\x00d5\x00d[\x00d\\\x00d\x00d^\x00d_\x00d2\x00da\x00d1\x00d\x00db\x00d_\x00d<\x00dc\x00dH\x00d\x1e\x00dd\x00dd\x00d\x00d]\x00df\x00de\x00d%\x00dI\x00d\x00dh\x00di\x00d*\x00dj\x00dk\x00d+\x00d\x17\x00dm\x00d\x00do\x00dp\x00d[\x00dr\x00ds\x00d\x00du\x00dA\x00dv\x00dC\x00d2\x00d\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x7f\x00dG\x00dC\x00d@\x00d\x00dF\x00d\x7f\x00d\x00d\x00d1\x00d\x00d\x00dn\x00d\x1b\x00d]\x00d;\x00dS\x00d:\x00dr\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00dq\x00d{\x00d\x00d<\x00d^\x00d\x00d\x0c\x00d\x00d\x00d\x04\x00dY\x00de\x00d\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d}\x00d`\x00d.\x00d\x0c\x00d\x00d\x00d\x11\x00dN\x00da\x00d"\x00d-\x00dr\x00d\x17\x00d1\x00d\x00d0\x00d{\x00d+\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00d\x1e\x00d\x00d\x00d\x00d\x00d\x00dS\x00dn\x00dw\x00d\x00d\x00d\x00d;\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x08\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00dK\x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00dg\x00d\x18\x00d\x19\x00d(\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d!\x00d \x00d!\x00d9\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x03\x00d(\x00d)\x00d?\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x00d/\x00d0\x00d\x00d1\x00d2\x00d\x00d4\x00d5\x00d*\x00d7\x00d8\x00dX\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00dZ\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x00dF\x00dG\x00d\x13\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d^\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x19\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d\x00dX\x00d+\x00d\x1d\x00dF\x00d\'\x00dq\x00d[\x00d\\\x00do\x00d^\x00d_\x00d\x00da\x00d1\x00dk\x00db\x00d_\x00di\x00dc\x00dH\x00d;\x00dd\x00dd\x00d\x00d]\x00df\x00d\x00d%\x00dI\x00d\x00dh\x00di\x00d^\x00dj\x00dk\x00dn\x00d\x17\x00dm\x00d\n\x00do\x00dp\x00d\x00dr\x00ds\x00d?\x00du\x00dA\x00d\x00dC\x00d2\x00d\x00dN\x00dx\x00d(\x00dz\x00d{\x00d\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d"\x00dG\x00dC\x00d\x11\x00d\x00dF\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00d\x00d\x00d\x00d\x0f\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00dJ\x00d{\x00d\x00d\x00d^\x00d\x00d\x00d\x00d\x00d\x00dY\x00de\x00d\x1d\x00dE\x00d\x00d\x00d\x00d\x00d\x1d\x00d\x00d\x00d$\x00d6\x00d.\x00d\x00d`\x00d.\x00dM\x00d\x00d\x00d\x00dN\x00da\x00d\t\x00d-\x00dr\x00d=\x00d1\x00d\x00d\x00d{\x00d+\x00d9\x00d\x00d\x19\x00d\x00d\x00d\x00dK\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d4\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00dQ\x00d\x11\x00d\x00d\x03\x00d\x00d\x00d{\x00d\x15\x00d\x16\x00d8\x00d\x18\x00d\x19\x00d(\x00d\x1b\x00d\x00dn\x00d\x13\x00d\x00d\x00d \x00d!\x00d\x00d#\x00d$\x00d@\x00d%\x00d&\x00d\x00d(\x00d)\x00d^\x00d*\x00d\x0b\x00d=\x00d,\x00d-\x00d\n\x00d/\x00d0\x00d\x00d1\x00d2\x00db\x00d4\x00d5\x00d\x00d7\x00d8\x00dr\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\r\x00dA\x00dB\x00d}\x00d+\x00dD\x00d\x1a\x00d$\x00dE\x00d\x00dF\x00dG\x00d\n\x00dI\x00dJ\x00d\x16\x00dL\x00dM\x00dx\x00d\x1b\x00dC\x00d2\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d\x00dU\x00dV\x00dS\x00dX\x00d+\x00d\x00dF\x00d\'\x00d@\x00d[\x00d\\\x00d+\x00d^\x00d_\x00d\x1a\x00da\x00d1\x00d\x00db\x00d_\x00d\x00dc\x00dH\x00d\x00dd\x00dd\x00d\x00d]\x00df\x00d\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d|\x00d\x17\x00dm\x00d)\x00do\x00dp\x00d\x00dr\x00ds\x00d^\x00du\x00dA\x00dN\x00dC\x00d2\x00d\x17\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x00dG\x00dC\x00d)\x00d\x00dF\x00d\x00d\x00d\x00d\x06\x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00d\x07\x00d\x00d\x00d\x11\x00d\x05\x00dZ\x00d$\x00d5\x00dy\x00d\x00d{\x00d\x00d\x00d^\x00d\x00d}\x00d\x00d\x00d\x00dY\x00de\x00d\x1b\x00dE\x00d\x00d9\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00do\x00d\x00d\x00d%\x00dN\x00da\x00dC\x00d-\x00dr\x00d%\x00d1\x00d\x00d(\x00d{\x00d+\x00d\n\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x04\x00d[\x00d\x00dk\x00d\x0e\x00d\\\x00d}\x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x00d \x00d!\x00d\x00d#\x00d$\x00d\x15\x00d%\x00d&\x00d\'\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d+\x00d,\x00d-\x00d.\x00d/\x00d0\x00d\x00d1\x00d2\x00d*\x00d4\x00d5\x00d6\x00d7\x00d8\x00d\x00d9\x00d:\x00d\x00d<\x00d=\x00d>\x00d?\x00d\x0e\x00d\x1a\x00dA\x00dB\x00d\x00d+\x00dD\x00d\x06\x00d$\x00dE\x00d\x05\x00dF\x00dG\x00d\x00dI\x00dJ\x00d\x00dL\x00dM\x00dA\x00d\x1b\x00dC\x00d!\x00dO\x00dP\x00dQ\x00dR\x00dS\x00dT\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d\x00dX\x00d+\x00d\x18\x00dF\x00d\'\x00d5\x00d[\x00d\\\x00d\x00d^\x00d_\x00d`\x00da\x00d1\x00dR\x00db\x00d_\x00d\x00dc\x00dH\x00d\t\x00dd\x00dd\x00d1\x00d]\x00df\x00d\x00d%\x00dI\x00dH\x00dh\x00di\x00d]\x00dj\x00dk\x00d?\x00d\x17\x00dm\x00d0\x00do\x00dp\x00d\x0f\x00dr\x00ds\x00dr\x00du\x00dA\x00d\x0b\x00dC\x00d2\x00d#\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x00d|\x00d \x00d\x00dU\x00dE\x00d&\x00d~\x00dO\x00d\x00dG\x00dC\x00d\x08\x00d\x00dF\x00dz\x00d\x00d\x00d\n\x00d\x00d\x00d\t\x00d\x1b\x00d]\x00d\x06\x00dS\x00d:\x00d.\x00d\x00d\x00dd\x00d\x05\x00dZ\x00d^\x00d5\x00dy\x00dD\x00d{\x00d\x00d\x00d^\x00d\x00d\x00d\x00d\x00d\x00dY\x00de\x00d\x07\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00dx\x00d6\x00d.\x00d\x07\x00d`\x00d.\x00d\x00d\x00d\x00d\x00dN\x00da\x00d/\x00d-\x00dr\x00dD\x00d1\x00d\x00dc\x00d{\x00d+\x00dY\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00dl\x00d\x00d\x00d\x00dn\x00dw\x00dc\x00d\x00d\x00d\x13\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d{\x00d[\x00d\x00dD\x00d\x0e\x00d\\\x00du\x00d\x11\x00d\x00d\x1b\x00d\x00d\x00d\x00d\x15\x00d\x16\x00dS\x00d\x18\x00d\x19\x00dn\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x00d(\x00d)\x00dT\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x00d/\x00d0\x00d7\x00d1\x00d2\x00d\x00d4\x00d5\x00d\x00d7\x00d8\x00d\x00d9\x00d:\x00dK\x00d<\x00d=\x00dz\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00dO\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x00dF\x00dG\x00d\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x00d#\x00d\x11\x00dw\x00dU\x00dV\x00d|\x00dX\x00d+\x00dW\x00dF\x00d\'\x00dY\x00d[\x00d\\\x00d\x00d^\x00d_\x00dl\x00da\x00d1\x00dB\x00db\x00d_\x00d\x00dc\x00dH\x00d\x0c\x00dd\x00dd\x00d\x00d]\x00df\x00d\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d\x00d\x17\x00dm\x00dp\x00do\x00dp\x00d\x00dr\x00ds\x00dE\x00du\x00dA\x00dw\x00dC\x00d2\x00dO\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x00d|\x00d \x00d\x00dU\x00dE\x00d\'\x00d~\x00dO\x00d\x13\x00dG\x00dC\x00d_\x00d\x00dF\x00d(\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1b\x00d]\x00d=\x00dS\x00d:\x00dW\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d\x00d{\x00d\x00du\x00d^\x00d\x00d\x00d\x00d\x00d\x00dY\x00de\x00d\x1b\x00dE\x00d\x00d\x1d\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00db\x00d\x00d\x00d\x00dN\x00da\x00d\x12\x00d-\x00dr\x00d#\x00d1\x00d\x00d>\x00d{\x00d+\x00dX\x00d\x00d\x19\x00d\x0f\x00d\x00d\x00d\x17\x00d\x00d\x00d\x00d\x00d\x00d]\x00dn\x00dw\x00d\x00d\x00d\x00d\x15\x00d\x05\x00d3\x00d+\x00d\x08\x00di\x00d\x1b\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00d\x03\x00d\x11\x00d\x00d\x00d\x00d\x00dB\x00d\x15\x00d\x16\x00d\x1f\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d\x00\x01d\x13\x00d\x00d!\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00do\x00d,\x00d-\x00dB\x00d/\x00d0\x00d\x00d1\x00d2\x00d\x00d4\x00d5\x00d7\x00d7\x00d8\x00d\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x00d+\x00dD\x00d;\x00d$\x00dE\x00d_\x00dF\x00dG\x00ds\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d_\x00dO\x00dP\x00d$\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d#\x00dX\x00d+\x00d\n\x00dF\x00d\'\x00d\x03\x00d[\x00d\\\x00d%\x00d^\x00d_\x00d\x00da\x00d1\x00d\x0f\x00db\x00d_\x00d\x00dc\x00dH\x00dp\x00dd\x00dd\x00d\x00d]\x00df\x00d\x0c\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d\x00d\x17\x00dm\x00dT\x00do\x00dp\x00dr\x00dr\x00ds\x00dM\x00du\x00dA\x00d\x00dC\x00d2\x00dH\x00dN\x00dx\x00dU\x00dz\x00d{\x00dG\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x00dG\x00dC\x00dh\x00d\x00dF\x00dn\x00d\x00d\x00di\x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x1e\x00dS\x00d:\x00dV\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d\x00d{\x00d\x00d\x00d^\x00d\x00dr\x00d\x00d\x00d\x00dY\x00de\x00d\x00dE\x00d\x00d\x00d\x00d\x00dZ\x00d\x00d\x00d\'\x00d6\x00d.\x00d\x00d`\x00d.\x00d \x00d\x00d\x00d\x00dN\x00da\x00d\x00d-\x00dr\x00d\x00d1\x00d\x00d(\x00d{\x00d+\x00d\n\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d{\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d-\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x04\x00d[\x00d\x00dk\x00d\x0e\x00d\\\x00dg\x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x03\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00de\x00d,\x00d-\x00d\x17\x00d/\x00d0\x00d\x00d1\x00d2\x00d\x00d4\x00d5\x00d[\x00d7\x00d8\x00d{\x00d9\x00d:\x00d\x00d<\x00d=\x00d[\x00d?\x00d\x0e\x00dW\x00dA\x00dB\x00d\x1b\x00d+\x00dD\x00d0\x00d$\x00dE\x00d\x00dF\x00dG\x00d#\x00dI\x00dJ\x00dT\x00dL\x00dM\x00dc\x00d\x1b\x00dC\x00d*\x00dO\x00dP\x00d*\x00dR\x00dS\x00di\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d*\x00dX\x00d+\x00d\x00dF\x00d\'\x00d\x00d[\x00d\\\x00d\x00d^\x00d_\x00dL\x00da\x00d1\x00dF\x00db\x00d_\x00d5\x00dc\x00dH\x00d`\x00dd\x00dd\x00d\x00d]\x00df\x00d\x00d%\x00dI\x00dh\x00dh\x00di\x00d\x00dj\x00dk\x00d]\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x07\x00dr\x00ds\x00d\x00d\x00dA\x00d\x00dC\x00d2\x00dC\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x00d|\x00d \x00dO\x00dU\x00dE\x00dM\x00d~\x00dO\x00d\x00dG\x00dC\x00d\x00d\x00dF\x00d\x00d\x00d\x00d^\x00d\x00d\x00dq\x00d\x1b\x00d]\x00dc\x00dS\x00d:\x00d7\x00d\x00d\x00d\x05\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d\x15\x00d{\x00d\x00d\x00d^\x00d\x00d\x00d\x00d\x00d\x00dY\x00de\x00d\x05\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00d\x19\x00dN\x00da\x00d\x00d-\x00dr\x00d\x00d1\x00d\x00d_\x00d{\x00d+\x00d]\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00di\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x00d[\x00d\x00d0\x00d\x0e\x00d\\\x00d\x00d\x11\x00d\x00de\x00d\x00d\x00dk\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00dp\x00d \x00d!\x00d\x00d#\x00d$\x00dK\x00d%\x00d&\x00dA\x00d(\x00d)\x00d\x00\x01d*\x00d\x0b\x00d9\x00d,\x00d-\x00d\x00d/\x00d0\x00d\x00d1\x00d2\x00d\x19\x00d4\x00d5\x00d\x00d7\x00d8\x00d\x00d9\x00d:\x00d \x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x02\x00d+\x00dD\x00d\x00d$\x00dE\x00dN\x00dF\x00dG\x00d\x00dI\x00dJ\x00d\x14\x00dL\x00dM\x00d\x1f\x00d\x1b\x00dC\x00d4\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d\x00dX\x00d+\x00d=\x00dF\x00d\'\x00d\x00d[\x00d\\\x00d\x00d^\x00d_\x00d\x00da\x00d1\x00d$\x00db\x00d_\x00di\x00dc\x00dH\x00d\r\x00dd\x00dd\x00d\x0c\x00d]\x00df\x00dL\x00d%\x00dI\x00d)\x00dh\x00di\x00d\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d\n\x00do\x00dp\x00dN\x00dr\x00ds\x00d\x00du\x00dA\x00d\x0c\x00dC\x00d2\x00d\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x13\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x10\x00dG\x00dC\x00dr\x00d\x00dF\x00d\x00d\x00d\x00d\x00d\x00d\x00d|\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00dE\x00d\x00d\x00d\\\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d\x00d{\x00d\x00dW\x00d^\x00d\x00d\x00d\x00d\x00d\x05\x00dY\x00de\x00d\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x06\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00d\x00d\x00da\x00d\x00d-\x00dr\x00d%\x00d1\x00d\x00dO\x00d{\x00d+\x00d]\x00d\x00d\x19\x00dX\x00d\x00d\x00dm\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00dQ\x00d\x11\x00d\x00d^\x00d\x00d\x00dT\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d(\x00d\x1b\x00d\x00dn\x00d\x13\x00d\x00d\x00d \x00d!\x00d\\\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x00d(\x00d)\x00d%\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x11\x00d/\x00d0\x00di\x00d1\x00d2\x00d\n\x00d4\x00d5\x00d~\x00d7\x00d8\x00d3\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x07\x00dA\x00dB\x00d\x12\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x00dF\x00dG\x00d\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d\x06\x00dO\x00dP\x00d\x00dR\x00dS\x00d.\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d\x00dX\x00d+\x00d\x00dF\x00d\'\x00d\x14\x00d[\x00d\\\x00d#\x00d^\x00d_\x00d|\x00da\x00d1\x00dq\x00db\x00d_\x00d\x00dc\x00dH\x00dp\x00dd\x00dd\x00d\x00d]\x00df\x00d=\x00d%\x00dI\x00d\x17\x00dh\x00di\x00d\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x07\x00dr\x00ds\x00d\x00du\x00dA\x00d\x15\x00dC\x00d2\x00dH\x00dN\x00dx\x00d\\\x00dz\x00d{\x00d_\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00dA\x00dG\x00dC\x00dL\x00d\x00dF\x00d\x00d\x00d\x00d\x00d\x00d\x00dd\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00dS\x00d\x00d\x00dg\x00d\x05\x00dZ\x00d\x05\x00d5\x00dy\x00d\x00d{\x00d\x00dg\x00d^\x00d\x00d*\x00d\x00d\x00d^\x00dY\x00de\x00d\x00dE\x00d\x00d3\x00d\x00d\x00dH\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00dx\x00dN\x00da\x00do\x00d-\x00dr\x00d\x00d1\x00d\x00d6\x00d{\x00d+\x00dN\x00d\x00d\x19\x00d\x0f\x00d\x00d\x00da\x00d\x00d\x00d\x00d\x00d\x00d=\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00dO\x00d\x08\x00di\x00d\x03\x00d[\x00d\x00d}\x00d\x0e\x00d\\\x00d}\x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d{\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d2\x00d\x13\x00d\x00d?\x00d \x00d!\x00dz\x00d#\x00d$\x00d\x00d%\x00d&\x00dj\x00d(\x00d)\x00d4\x00d*\x00d\x0b\x00d\x1a\x00d,\x00d-\x00do\x00d/\x00d0\x00d&\x00d1\x00d2\x00d3\x00d4\x00d5\x00d6\x00d7\x00d8\x00d\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00dr\x00d+\x00dD\x00d2\x00d$\x00dE\x00d\x05\x00dF\x00dG\x00dH\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00dJ\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x1f\x00d#\x00d\x11\x00d\x00dU\x00dV\x00dx\x00dX\x00d+\x00d+\x00dF\x00d\'\x00d.\x00d[\x00d\\\x00d\x00d^\x00d_\x00d\x00da\x00d1\x00d|\x00db\x00d_\x00d\x00dc\x00dH\x00dk\x00dd\x00dd\x00d_\x00d]\x00df\x00d\x00d%\x00dI\x00d-\x00dh\x00di\x00dm\x00dj\x00dk\x00d\x19\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00d\x00du\x00dA\x00dv\x00dC\x00d2\x00d\x00dN\x00dx\x00d7\x00dz\x00d{\x00dz\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x7f\x00dG\x00dC\x00d@\x00d\x00dF\x00d\x00d\x00d\x00dW\x00d\x00d\x00d\x0f\x00d\x1b\x00d]\x00d;\x00dS\x00d:\x00dU\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d\'\x00d{\x00d\x00d\x00d^\x00d\x00d\x00d\x00d\x00d4\x00dY\x00de\x00d=\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d}\x00d`\x00d.\x00d\x00d\x00d\x00d\x00dN\x00da\x00d\x00d-\x00dr\x00d\x00d1\x00d\x00d\x00d{\x00d+\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d,\x00d\x00d\x00dZ\x00dn\x00dw\x00d\x00d\x00d\x00do\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00d\x00d\x11\x00d\x00d\x00d\x00d\x00d\x10\x00d\x15\x00d\x16\x00d&\x00d\x18\x00d\x19\x00d>\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x00d \x00d!\x00do\x00d#\x00d$\x00d\x00d%\x00d&\x00d:\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x00d/\x00d0\x00d\x00d1\x00d2\x00d1\x00d4\x00d5\x00d\x7f\x00d7\x00d8\x00d\x1f\x00d9\x00d:\x00do\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00dz\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x00d\x00dG\x00d\x13\x00dI\x00dJ\x00d\x00dL\x00dM\x00d}\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00dy\x00dR\x00dS\x00d\x19\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d\x00dX\x00d+\x00d,\x00dF\x00d\'\x00db\x00d[\x00d\\\x00dU\x00d^\x00d_\x00d\x00da\x00d1\x00d\x00db\x00d_\x00d\x00dc\x00dH\x00d\x00dd\x00dd\x00dZ\x00d]\x00df\x00d\x00d%\x00dI\x00d\x00dh\x00di\x00d^\x00dj\x00dk\x00d`\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00d\x00du\x00dA\x00d`\x00dC\x00d2\x00d?\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x00d|\x00d \x00dX\x00dU\x00dE\x00d\x00d~\x00dO\x00d"\x00dG\x00dC\x00d\x00d\x00dF\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1b\x00d]\x00d6\x00dS\x00d:\x00dW\x00d\x00d\x00d/\x00d\x05\x00dZ\x00d\x07\x00d5\x00dy\x00d&\x00d{\x00d\x00d\r\x00d^\x00d\x00dt\x00d\x00d\x00d\x00dY\x00de\x00d\x00dE\x00d\x00dj\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00d\x10\x00dN\x00da\x00d\x00d-\x00dr\x00d\x00d1\x00d\x00d\x00d{\x00d+\x00dI\x00d\x00d\x19\x00d\x00d\x00d\x00d\x12\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00dQ\x00d\x11\x00d\x00d\'\x00d\x00d\x00dj\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d(\x00d\x1b\x00d\x00d@\x00d\x13\x00d\x00d\x00d \x00d!\x00d\x00d#\x00d$\x00dK\x00d%\x00d&\x00d}\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00dy\x00d/\x00d0\x00dc\x00d1\x00d2\x00d\x00d4\x00d5\x00d\x00d7\x00d8\x00dM\x00d9\x00d:\x00dd\x00d<\x00d=\x00d\x12\x00d?\x00d\x0e\x00dx\x00dA\x00dB\x00d\x00d+\x00dD\x00dR\x00d$\x00dE\x00d_\x00dF\x00dG\x00d\x00dI\x00dJ\x00d6\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d\x19\x00dX\x00d+\x00d\n\x00dF\x00d\'\x00dd\x00d[\x00d\\\x00d\'\x00d^\x00d_\x00d\x00da\x00d1\x00d\x00db\x00d_\x00d%\x00dc\x00dH\x00dp\x00dd\x00dd\x00dD\x00d]\x00df\x00d\x0c\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00dJ\x00du\x00dA\x00d\x00dC\x00d2\x00d\x00dN\x00dx\x00d\x00dz\x00d{\x00d_\x00d|\x00d \x00dx\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x00dG\x00dC\x00d!\x00d\x00dF\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x11\x00d\x1b\x00d]\x00d[\x00dS\x00d:\x00d\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x1b\x00d5\x00dy\x00d8\x00d{\x00d\x00d&\x00d^\x00d\x00d\x00d\x00d\x00d\x00dY\x00de\x00dO\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\'\x00d6\x00d.\x00dz\x00d`\x00d.\x00d\x02\x00d\x00d\x00d+\x00dN\x00da\x00d\x00d-\x00dr\x00dN\x00d1\x00d\x00d(\x00d{\x00d+\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d#\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x04\x00d[\x00d\x00d\x12\x00d\x0e\x00d\\\x00dZ\x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00dY\x00d\x13\x00d\x00d\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00d\'\x00d(\x00d)\x00d\x1f\x00d*\x00d\x0b\x00d7\x00d,\x00d-\x00d\x00d/\x00d0\x00d&\x00d1\x00d2\x00d\x05\x00d4\x00d5\x00d\x00d7\x00d8\x00d1\x00d9\x00d:\x00d;\x00d<\x00d=\x00di\x00d?\x00d\x0e\x00d\x1a\x00dA\x00dB\x00d\x1c\x00d+\x00dD\x00d\x00d$\x00dE\x00d)\x00dF\x00dG\x00dH\x00dI\x00dJ\x00d\x00dL\x00dM\x00dY\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00d\x00dR\x00dS\x00dT\x00d#\x00d\x11\x00d\x1c\x00dU\x00dV\x00dk\x00dX\x00d+\x00d\\\x00dF\x00d\'\x00d5\x00d[\x00d\\\x00du\x00d^\x00d_\x00dv\x00da\x00d1\x00dn\x00db\x00d_\x00d\x00dc\x00dH\x00d=\x00dd\x00dd\x00d\x00d]\x00df\x00d\x00d%\x00dI\x00dp\x00d\x00di\x00d\x00dj\x00dk\x00dl\x00d\x17\x00dm\x00d\x0c\x00do\x00dp\x00d\x00dr\x00ds\x00d\x00du\x00dA\x00d\x00dC\x00d2\x00d\x19\x00dN\x00dx\x00d7\x00dz\x00d{\x00dc\x00d\x00d \x00d\x07\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x7f\x00dG\x00dC\x00d\x00d\x00dF\x00d!\x00d\x00d\x00dW\x00d\x00d\x00d]\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00d\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d\x0e\x00d{\x00d\x00d\x00d^\x00d\x00d{\x00d\x00d\x00d4\x00dY\x00de\x00d=\x00dE\x00d\x00d7\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d}\x00d`\x00d.\x00d\x0c\x00d\x00d\x00d\x11\x00dN\x00da\x00d"\x00d-\x00dr\x00d\x00d1\x00d\x00d\x00d{\x00d+\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x18\x00d\x00d\x00dS\x00dn\x00dw\x00d\x00d\x00d\x00d;\x00d\x05\x00d3\x00d}\x00d\x08\x00di\x00d\x01\x01d[\x00d\x00d\x00d\x0e\x00d\\\x00dK\x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d&\x00d\x18\x00d\x19\x00d>\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d!\x00d \x00d!\x00d\x0c\x00d#\x00d$\x00d\x00d%\x00d&\x00d:\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00dK\x00d/\x00d0\x00d\x00d1\x00d2\x00d1\x00d4\x00d5\x00d\x7f\x00d7\x00d8\x00dX\x00d9\x00d:\x00d\x0e\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00dz\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x00dF\x00dG\x00d\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x11\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x19\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d\n\x00dX\x00d+\x00d|\x00dF\x00d\'\x00d\x04\x00d[\x00d\\\x00d\x00d^\x00d_\x00d\x12\x00da\x00d1\x00d\x00db\x00d_\x00d\x00dc\x00dH\x00d\x00dd\x00dd\x00d\x00d]\x00df\x00d"\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00de\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00d"\x00du\x00dA\x00dw\x00dC\x00d2\x00d\x00dN\x00dx\x00d>\x00dz\x00d{\x00dV\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x10\x00dG\x00dC\x00d\x1f\x00d\x00dF\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00d\x00d\x00d\x00d\x0f\x00d\x05\x00dZ\x00d2\x00d5\x00dy\x00dO\x00d{\x00d\x00d\x00d^\x00d\x00d\x00d\x00d\x00d\x00dY\x00de\x00d\x00dE\x00d\x00dj\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d\x03\x00d`\x00d.\x00d\x00d\x00d\x00d\x10\x00dN\x00da\x00d\t\x00d-\x00dr\x00d=\x00d1\x00d\x00dI\x00d{\x00d+\x00dW\x00d\x00d\x19\x00d\x00d\x00d\x00dK\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00dg\x00d\x08\x00di\x00d\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00d\x00d\x11\x00d\x00d\x00d\x00d\x00dj\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d(\x00d\x1b\x00d\x00d7\x00d\x13\x00d\x00d\x14\x00d \x00d!\x00d\x00d#\x00d$\x00df\x00d%\x00d&\x00d\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00dk\x00d/\x00d0\x00d\x00d1\x00d2\x00d*\x00d4\x00d5\x00d\x00d7\x00d8\x00dM\x00d9\x00d:\x00d4\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x00d+\x00dD\x00dD\x00d$\x00dE\x00d_\x00dF\x00dG\x00d$\x00dI\x00dJ\x00dP\x00dL\x00dM\x00d\x1b\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00d$\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d#\x00dX\x00d+\x00d\x00dF\x00d\'\x00d\x00d[\x00d\\\x00d\x00d^\x00d_\x00d}\x00da\x00d1\x00d\x00db\x00d_\x00d%\x00dc\x00dH\x00dT\x00dd\x00dd\x00d\x12\x00d]\x00df\x00d\x0c\x00d%\x00dI\x00d\x00dh\x00di\x00db\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d\x00do\x00dp\x00dA\x00dr\x00ds\x00dJ\x00du\x00dA\x00d\x00dC\x00d2\x00d\x00dN\x00dx\x00d2\x00dz\x00d{\x00d<\x00d|\x00d \x00d\x00dU\x00dE\x00d/\x00d~\x00dO\x00d\x00dG\x00dC\x00d\x00d\x00dF\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x11\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00dV\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d\x00d{\x00d\x00d&\x00d^\x00d\x00d\x00d\x00d\x00dB\x00dY\x00de\x00d]\x00dE\x00d\x00dz\x00d\x00d\x00d\x00d\x00d\x00d\'\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00d+\x00dN\x00da\x00d\'\x00d-\x00dr\x00d\x00d1\x00d\x00d\x00d{\x00d+\x00d\n\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1d\x00d\x00d\x00d\x00dn\x00dw\x00d&\x00d\x00d\x00d\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00di\x00d[\x00d\x00ds\x00d\x0e\x00d\\\x00d}\x00d\x11\x00d\x00d\x00d\x00d\x00d)\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00dx\x00d\x1b\x00d\x00d$\x00d\x13\x00d\x00d\x00d \x00d!\x00d<\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x00d(\x00d)\x00d\x1f\x00d*\x00d\x0b\x00d6\x00d,\x00d-\x00do\x00d/\x00d0\x00d&\x00d1\x00d2\x00dv\x00d4\x00d5\x00d\x00d7\x00d8\x00d\x00d9\x00d:\x00d\x00d<\x00d=\x00d>\x00d?\x00d\x0e\x00d@\x00dA\x00dB\x00d=\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x05\x00dF\x00dG\x00d\x7f\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d\x04\x00dO\x00dP\x00d+\x00dR\x00dS\x00dT\x00d#\x00d\x11\x00d\x0c\x00dU\x00dV\x00d\x12\x00dX\x00d+\x00dY\x00dF\x00d\'\x00d\x00d[\x00d\\\x00d]\x00d^\x00d_\x00d`\x00da\x00d1\x00d\x00db\x00d_\x00d"\x00dc\x00dH\x00d\x1e\x00dd\x00dd\x00d=\x00d]\x00df\x00d\x00d%\x00dI\x00d\x00dh\x00di\x00d&\x00dj\x00dk\x00d\x12\x00d\x17\x00dm\x00d5\x00do\x00dp\x00d\x00dr\x00ds\x00d\x00du\x00dA\x00d\x00dC\x00d2\x00d\x00dN\x00dx\x00d7\x00dz\x00d{\x00d\x00d|\x00d \x00d\x18\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x00dG\x00dC\x00d\x1b\x00d\x00dF\x00d\x7f\x00d\x00d\x00d\x00d\x00d\x00d;\x00d\x1b\x00d]\x00d;\x00dS\x00d:\x00d\x00d\x00d\x00d:\x00d\x05\x00dZ\x00d`\x00d5\x00dy\x00d\'\x00d{\x00d\x00d^\x00d^\x00d\x00d?\x00d\x00d\x00d\x00dY\x00de\x00d=\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00dg\x00d`\x00d.\x00d_\x00d\x00d\x00d\x11\x00dN\x00da\x00d\x00d-\x00dr\x00d\x14\x00d1\x00d\x00d\x00d{\x00d+\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00d*\x00d\x00d\x00d,\x00d\x00d\x00d\x00dn\x00dw\x00d*\x00d\x00d\x00d;\x00d\x05\x00d3\x00dh\x00d\x08\x00di\x00d\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00d\x00d\x11\x00d\x00d\x02\x00d\x00d\x00d\x10\x00d\x15\x00d\x16\x00d?\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x00d \x00d!\x00d*\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x00d(\x00d)\x00d?\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x00d/\x00d0\x00d\x1b\x00d1\x00d2\x00dZ\x00d4\x00d5\x00d\x7f\x00d7\x00d8\x00d\x00d9\x00d:\x00d\x14\x00d<\x00d=\x00d\x03\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x00d+\x00dD\x00dU\x00d$\x00dE\x00d\x00dF\x00dG\x00d;\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x1d\x00d#\x00d\x11\x00d\x1a\x00dU\x00dV\x00d\x00dX\x00d+\x00dR\x00dF\x00d\'\x00d\x17\x00d[\x00d\\\x00dU\x00d^\x00d_\x00d\x00da\x00d1\x00d/\x00db\x00d_\x00d\x00dc\x00dH\x00d\x00dd\x00dd\x00d\x00d]\x00df\x00d\x00d%\x00dI\x00dK\x00dh\x00di\x00d\x00dj\x00dk\x00dt\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00d\t\x00du\x00dA\x00d\x00dC\x00d2\x00d\x00dN\x00dx\x00d\x00dz\x00d{\x00dm\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x00dG\x00dC\x00d\x00d\x00dF\x00d\x00d\x00d\x00d\x10\x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x04\x00dS\x00d:\x00d\x00d\x00d\x00d\x00d\x05\x00dZ\x00dJ\x00d5\x00dy\x00d\x00d{\x00d\x00d\x00d^\x00d\x00d|\x00d\x00d\x00d\x1d\x00dY\x00de\x00d\x00dE\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00dY\x00d`\x00d.\x00dU\x00d\x00d\x00d\x10\x00dN\x00da\x00d\x00d-\x00dr\x00d\x00\x01d1\x00d\x00d\x00d{\x00d+\x00dM\x00d\x00d\x19\x00d\x00d\x00d\x00dK\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d*\x00d\x05\x00d3\x00dg\x00d\x08\x00di\x00d\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00d\x00d\x11\x00d\x00d\x00d\x00d\x00dj\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d(\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x1b\x00d \x00d!\x00d\x00d#\x00d$\x00dK\x00d%\x00d&\x00d\x00d(\x00d)\x00d%\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x00d/\x00d0\x00d\t\x00d1\x00d2\x00dr\x00d4\x00d5\x00d\x00d7\x00d8\x00dM\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x00d+\x00dD\x00d;\x00d$\x00dE\x00d_\x00dF\x00dG\x00d9\x00dI\x00dJ\x00d+\x00dL\x00dM\x00dU\x00d\x1b\x00dC\x00d_\x00dO\x00dP\x00d$\x00dR\x00dS\x00d\x00d#\x00d\x11\x00dt\x00dU\x00dV\x00d-\x00dX\x00d+\x00dU\x00dF\x00d\'\x00d\x00d[\x00d\\\x00d\x00d^\x00d_\x00d\x00da\x00d1\x00d\x00db\x00d_\x00d\x00dc\x00dH\x00d\x00dd\x00dd\x00d\x00d]\x00df\x00d\x1f\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d8\x00d\x17\x00dm\x00dA\x00do\x00dp\x00d\x00dr\x00ds\x00d\x00du\x00dA\x00dj\x00dC\x00d2\x00d\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x00d|\x00d \x00d\x00dU\x00dE\x00d>\x00d~\x00dO\x00d\x00dG\x00dC\x00d\x00d\x00dF\x00db\x00d\x00d\x00d]\x00d\x00d\x00d{\x00d\x1b\x00d]\x00d6\x00dS\x00d:\x00d\x00d\x00d\x00d\x00d\x05\x00dZ\x00dU\x00d5\x00dy\x00d\x00d{\x00d\x00d\x00d^\x00d\x00d\x00d\x00d\x00d\x00dY\x00de\x00dQ\x00dE\x00d\x00d3\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d!\x00d`\x00d.\x00d\x00d\x00d\x00dO\x00dN\x00da\x00d\x00d-\x00dr\x00d\x00d1\x00d\x00dO\x00d{\x00d+\x00dD\x00d\x00d\x19\x00d\x00d\x00d\x00d\x17\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00dz\x00d\x05\x00d3\x00d\x17\x00d\x08\x00di\x00dX\x00d[\x00d\x00dj\x00d\x0e\x00d\\\x00d[\x00d\x11\x00d\x00do\x00d\x00d\x00dv\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d!\x00d\x13\x00d\x00dI\x00d \x00d!\x00dP\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00dW\x00d/\x00d0\x00d\x00d1\x00d2\x00dI\x00d4\x00d5\x00d\x1a\x00d7\x00d8\x00dX\x00d9\x00d:\x00d,\x00d<\x00d=\x00d[\x00d?\x00d\x0e\x00d\x00d\x00dB\x00d\x00d+\x00dD\x00d\x00d$\x00dE\x00dT\x00dF\x00dG\x00d\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x1b\x00d\x1b\x00dC\x00dn\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d_\x00dU\x00dV\x00d9\x00dX\x00d+\x00dR\x00dF\x00d\'\x00d\x00d[\x00d\\\x00dc\x00d^\x00d_\x00d\x00da\x00d1\x00d\x00db\x00d_\x00d*\x00dc\x00dH\x00dk\x00dd\x00dd\x00dt\x00d]\x00df\x00d!\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00d\x07\x00du\x00dA\x00d\x00dC\x00d2\x00d\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x00d|\x00d \x00dj\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x06\x00dG\x00dC\x00d\x00d\x00dF\x00d\x00d\x00d\x00d\x1e\x00d\x00d\x00d\x00d\x1b\x00d]\x00d9\x00dS\x00d:\x00d\x00d\x00d\x00d\x04\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d\x00d{\x00d\x00d\x00d^\x00d\x00d/\x00d\x00d\x00d\x00dY\x00de\x00d}\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d{\x00d`\x00d.\x00d\x00d\x00d\x00d\x00dN\x00da\x00db\x00d\x00dr\x00d\x0f\x00d1\x00d\x00dc\x00d{\x00d+\x00dq\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x00d[\x00d\x00df\x00d\x0e\x00d\\\x00d\x00d\x11\x00d\x00dt\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00dp\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00d4\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x0e\x00d/\x00d0\x00d\x0b\x00d1\x00d2\x00d$\x00d4\x00d5\x00dz\x00d7\x00d8\x00d\x00d9\x00d:\x00d\x00dg\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x00d+\x00dD\x00d"\x00d$\x00dE\x00d`\x00dF\x00dG\x00dP\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d^\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x19\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d\x00dX\x00d+\x00d\x1d\x00dF\x00d\'\x00dq\x00d[\x00d\\\x00do\x00d^\x00d_\x00d\x00da\x00d1\x00dk\x00db\x00d_\x00di\x00dc\x00dH\x00d;\x00dd\x00dd\x00d\x00d]\x00df\x00d\x00d%\x00dI\x00d\x00dh\x00di\x00d^\x00dj\x00dk\x00dn\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x1e\x00dr\x00ds\x00d"\x00du\x00dA\x00d\x00dC\x00d2\x00d\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00d"\x00dG\x00dC\x00d>\x00d\x00dF\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00d\x00d\x00d\x00di\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00dO\x00d{\x00d\x00d\x00d^\x00d\x00d\x00d\x00d\x00d9\x00dY\x00de\x00d"\x00dE\x00d\x00d\x00d\x00d\x00dl\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00d\x10\x00dN\x00da\x00dN\x00d-\x00dr\x00d\x00d1\x00d\x00d\x00d{\x00d+\x00dI\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1f\x00d\x00d\x00d\x00dn\x00dw\x00d.\x00d\x00d\x00d\x00d\x05\x00d3\x00dg\x00d\x08\x00di\x00d\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00dQ\x00d\x11\x00d\x00d^\x00d\x00d\x00di\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d(\x00d\x1b\x00d\x00dn\x00d\x13\x00d\x00d[\x00d \x00d!\x00d\x00d#\x00d$\x00df\x00d%\x00d&\x00d}\x00d(\x00d)\x00d\n\x00d*\x00d\x0b\x00d6\x00d,\x00d-\x00d2\x00d\x1d\x00d0\x00d\x00d1\x00d2\x00d\x00d4\x00d5\x00d\x00d7\x00d8\x00dM\x00d9\x00d:\x00d4\x00d<\x00d=\x00d^\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x00d\x00dD\x00d\x00d$\x00dE\x00d\x10\x00dF\x00dG\x00dq\x00dI\x00dJ\x00d\x1b\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d_\x00dO\x00dP\x00d$\x00dR\x00dS\x00dD\x00d#\x00d\x11\x00d\x05\x00dU\x00dV\x00d#\x00dX\x00d+\x00d\n\x00dF\x00d\'\x00d\x03\x00d[\x00d\\\x00dJ\x00d^\x00d_\x00d\x00da\x00d1\x00d\x0f\x00db\x00d_\x00d\x00dc\x00dH\x00dp\x00dd\x00dd\x00dO\x00d]\x00df\x00d\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00dM\x00du\x00dA\x00d\x00dC\x00d2\x00dH\x00dN\x00dx\x00d\x00dz\x00d{\x00dY\x00d|\x00d \x00d\x15\x00dU\x00dE\x00d>\x00d~\x00dO\x00d\x00dG\x00dC\x00dL\x00d\x00dF\x00dn\x00d\x00d\x00d\x00d\x00d\x00d\x11\x00d\x1b\x00d]\x00d9\x00dS\x00d:\x00dV\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x1b\x00d5\x00dy\x00dA\x00d{\x00d\x00d&\x00d^\x00d\x00d\x00d\x00d\x00dB\x00dY\x00de\x00de\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\'\x00d6\x00d.\x00d\x00d`\x00d.\x00d \x00d\x00d\x00di\x00dN\x00da\x00d\'\x00d-\x00dr\x00d\x00d1\x00d\x00d(\x00d{\x00d+\x00d\n\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00d8\x00d\x08\x00di\x00d\x01\x01d[\x00d\x00d\x00d\x0e\x00d\\\x00d[\x00d\x11\x00d\x00d\x1c\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d\x16\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d`\x00d\x13\x00d\x00d\x00d \x00d!\x00d\x00d#\x00d$\x00d\x15\x00d%\x00d&\x00d\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d&\x00d,\x00d-\x00d\x00d/\x00d0\x00d\x00d1\x00d2\x00d3\x00d4\x00d5\x00d6\x00d7\x00d8\x00d\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d@\x00dA\x00dB\x00d\x1c\x00d+\x00dD\x00d\x06\x00d$\x00dE\x00d\x05\x00dF\x00dG\x00d\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d\x12\x00dU\x00dV\x00d\x1d\x00dU\x00d+\x00dS\x00dF\x00d\'\x00d\x00d[\x00d\\\x00d]\x00d^\x00d_\x00d\x00da\x00d1\x00dn\x00db\x00d_\x00dd\x00dc\x00dH\x00dT\x00dd\x00dd\x00d$\x00d]\x00df\x00d_\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d\x07\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x12\x00dr\x00ds\x00dt\x00du\x00dA\x00d\x00dC\x00d2\x00dc\x00dN\x00dx\x00d7\x00dz\x00d{\x00dz\x00d|\x00d \x00d\x00dU\x00dE\x00d2\x00d~\x00dO\x00dB\x00dG\x00dC\x00d@\x00d\x00dF\x00d\x7f\x00d\x00d\x00dW\x00d\x00d\x00d\x0e\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00dr\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d\x11\x00d{\x00d\x00d[\x00d^\x00d\x00d^\x00d\x00d\x00d\x00dY\x00de\x00dY\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d\x15\x00d`\x00d.\x00d\x00d\x00d\x00d\x00dN\x00da\x00d\x05\x00d-\x00dr\x00d\x00d1\x00d\x00d\x00d{\x00d+\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00d<\x00d\x00d\x00dY\x00d\x00d\x00d\x14\x00dn\x00dw\x00d\x00d\x00d\x00d,\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x00d[\x00d\x00dJ\x00d\x0e\x00d\\\x00dN\x00d\x11\x00d\x00dx\x00d\x00d\x00d\x0f\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00dI\x00d\x13\x00d\x00d\x16\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x00d(\x00d)\x00dF\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x00d/\x00d0\x00d\x00d1\x00d2\x00d1\x00d4\x00d5\x00dR\x00d7\x00d8\x00d-\x00d9\x00d:\x00d\x00d<\x00d=\x00d2\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00dz\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x00dF\x00dG\x00d\x00dI\x00dJ\x00d\x00dL\x00dM\x00d}\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00du\x00dR\x00dS\x00d\x1a\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d\x00dX\x00d+\x00d,\x00dF\x00d\'\x00d9\x00d[\x00d\\\x00d\x00d^\x00d_\x00d\x00da\x00d1\x00de\x00db\x00d_\x00d:\x00dc\x00dH\x00d!\x00dd\x00dd\x00d\x00d]\x00df\x00d\x11\x00d%\x00dI\x00d\x00dh\x00di\x00d^\x00dj\x00dk\x00dn\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00dw\x00du\x00dA\x00dp\x00dC\x00d2\x00d-\x00dN\x00dx\x00d\x00dz\x00d{\x00d3\x00d|\x00d \x00d(\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x00dG\x00dC\x00d>\x00d\x00dF\x00d\'\x00d\x00d\x00d1\x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00dW\x00d\x00d\x00d?\x00d\x05\x00dZ\x00dm\x00d5\x00dy\x00d\x00d{\x00d\x00dq\x00d^\x00d\x00d4\x00d\x00d\x00d\r\x00dY\x00de\x00d\x00dE\x00d\x00d5\x00d\x00d\x00d-\x00d\x00d\x00d\x16\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00d\x00dN\x00da\x00d#\x00d-\x00dr\x00dk\x00d1\x00d\x00d)\x00d{\x00d+\x00dI\x00d\x00d\x19\x00d\x00d\x00d\x00dK\x00d\x00d\x00d7\x00d\x00d\x00dm\x00dn\x00dw\x00d\x1a\x00d\x00d\x00d\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d*\x00d[\x00d\x00da\x00d\x0e\x00d\\\x00d\x00d\x11\x00d\x00d-\x00d\x00d\x00d.\x00d\x15\x00d\x16\x00dC\x00d\x18\x00d\x19\x00dN\x00d\x1b\x00d\x00d!\x00d\x13\x00d\x00d}\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x04\x00d(\x00d)\x00d@\x00d*\x00d\x0b\x00d{\x00d,\x00d-\x00d\x00d/\x00d0\x00d`\x00d1\x00d2\x00dr\x00d4\x00d5\x00d\x00d7\x00d8\x00dM\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00dx\x00dA\x00dB\x00d\x00d+\x00dD\x00d;\x00d$\x00dE\x00d\x00dF\x00dG\x00d[\x00dI\x00dJ\x00dP\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d_\x00dO\x00dP\x00d\x00dR\x00dS\x00d=\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d#\x00dX\x00d+\x00d\n\x00dF\x00d\'\x00d\x00d[\x00d\\\x00dl\x00d^\x00d_\x00d\x00da\x00d1\x00d\x0f\x00db\x00d_\x00d\x00dc\x00dH\x00d\x00dd\x00dd\x00d\x00d]\x00df\x00d\x0c\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d$\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00dM\x00du\x00dA\x00d\x00dC\x00d2\x00dD\x00dN\x00dx\x00d\x00dz\x00d{\x00d%\x00d|\x00d \x00d\x00dU\x00dE\x00d^\x00d~\x00dO\x00d\x00dG\x00dC\x00d\x00d\x00dF\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1b\x00d]\x00d9\x00dS\x00d:\x00dV\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d\x00d{\x00d\x00du\x00d^\x00d\x00dw\x00d\x00d\x00d\x00dY\x00de\x00d\x00dE\x00d\x00d|\x00d\x00d\x00d<\x00d\x00d\x00dI\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x08\x00d\x00d\x00d+\x00dN\x00da\x00d\'\x00d-\x00dr\x00d\x00d1\x00d\x00d(\x00d{\x00d+\x00d\x00d\x00d\x19\x00dN\x00d\x00d\x00d\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d`\x00d\x05\x00d3\x00dt\x00d\x08\x00di\x00d\x00d[\x00d\x00ds\x00d\x0e\x00d\\\x00d}\x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d(\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x00d \x00d!\x00d\x00d#\x00d$\x00d\x15\x00d%\x00d&\x00d\'\x00d(\x00d)\x00d\x1f\x00d*\x00d\x0b\x00d\x1a\x00d,\x00d-\x00da\x00d/\x00d0\x00d&\x00d1\x00d2\x00d3\x00d4\x00d5\x00d6\x00d7\x00d8\x00dt\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d@\x00dA\x00dB\x00d\x1c\x00d+\x00dD\x00d\x06\x00d$\x00dE\x00dz\x00d\x00dG\x00d\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d!\x00dO\x00dP\x00dM\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d\x00dU\x00dV\x00d\x00dX\x00d+\x00d\'\x00dF\x00d\'\x00d\x00d[\x00d\\\x00dO\x00d^\x00d_\x00dv\x00da\x00d1\x00dn\x00db\x00d_\x00d<\x00dc\x00dH\x00d\x1e\x00dd\x00dd\x00db\x00d]\x00df\x00d:\x00d%\x00dI\x00dg\x00dh\x00di\x00d&\x00dj\x00dk\x00dl\x00d\x17\x00dm\x00d\x00do\x00dp\x00d[\x00dr\x00ds\x00d\x00du\x00dA\x00dv\x00dC\x00d2\x00d\x00dN\x00dx\x00d\x00dz\x00d{\x00d\x00d|\x00d \x00d\x0c\x00dU\x00dE\x00d\x00d~\x00dO\x00d\x7f\x00dG\x00dC\x00d@\x00d\x00dF\x00d\x7f\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1b\x00d]\x00d;\x00dS\x00d:\x00dr\x00d\x00d\x00d\x00d\x05\x00dZ\x00d0\x00d5\x00dy\x00d.\x00d{\x00d\x00d\x00d^\x00d\x00d{\x00d\x00d\x00d4\x00dY\x00de\x00d\x00dE\x00d\x00dl\x00d\x00d\x00dS\x00d\x00d\x00d\x00d6\x00d.\x00d}\x00d`\x00d.\x00d\x00d\x00d\x00d\x00dN\x00da\x00d(\x00d-\x00dr\x00d@\x00d1\x00d\x00dJ\x00d{\x00d+\x00dJ\x00d\x00d\x19\x00dD\x00d\x00d\x00dy\x00d\x00d\x00dY\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00dO\x00d\x08\x00di\x00d}\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00dl\x00d\x11\x00d\x00d\x00d\x00d\x00d\x10\x00d\x15\x00d\x16\x00d&\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00di\x00d \x00d!\x00d\x0c\x00d#\x00d$\x00d\x01\x01d%\x00d&\x00d\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x00d/\x00d0\x00d\x00d1\x00d2\x00d\x00d4\x00d5\x00d\x00d7\x00d8\x00dX\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d1\x00dA\x00dB\x00de\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x00dF\x00dG\x00d\x13\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x1b\x00d\x1b\x00dC\x00d&\x00dO\x00dP\x00dk\x00dR\x00dS\x00d\x00d#\x00d\x11\x00dI\x00dU\x00dV\x00d\x00dX\x00d+\x00d,\x00dF\x00d\'\x00d{\x00d[\x00d\\\x00do\x00d^\x00d_\x00d\x00da\x00d1\x00d$\x00db\x00d_\x00d%\x00dc\x00dH\x00d\x00dd\x00dd\x00df\x00d]\x00df\x00d\x00d%\x00dI\x00d\x00dh\x00di\x00d^\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00d"\x00du\x00dA\x00d\x00dC\x00d2\x00d\x00dN\x00dx\x00d\x00dz\x00d{\x00dV\x00d|\x00d \x00d\x00dU\x00dE\x00d\x00d~\x00dO\x00dW\x00dG\x00dC\x00d\x00d\x00dF\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x17\x00dS\x00d:\x00d$\x00d\x00d\x00d\x00d\x05\x00dZ\x00d2\x00d5\x00dy\x00dO\x00d{\x00d\x00d\x00d^\x00d\x00d\x00d\x00d\x00d\x00dY\x00de\x00d\x00dE\x00d\x00dj\x00d\x00d\x00d\'\x00d\x00d\x00d>\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00d\x10\x00dN\x00da\x00d\x00d-\x00dr\x00d\x00d1\x00d\x00dC\x00d{\x00d+\x00d\x00\x01d\x00d\x19\x00d\x00d\x00d\x00d\x06\x00d\x00d\x00di\x00d\x00d\x00d\x00dn\x00dw\x00d\x00d\x00d\x00d\x00d\x05\x00d3\x00d\x0e\x00d\x08\x00di\x00d.\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00dQ\x00d\x11\x00d\x00d\x00d\x00d\x00d%\x00d\x15\x00d\x16\x00dW\x00d\x18\x00d\x19\x00d(\x00d\x1b\x00d\x00dn\x00d\x13\x00d\x00d\x00d \x00d!\x00df\x00d#\x00d$\x00d\x02\x00d%\x00d&\x00d\x00d(\x00d)\x00d%\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d\x00d/\x00d0\x00d`\x00d1\x00d2\x00dr\x00d4\x00d5\x00d\x00d7\x00d8\x00dM\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00dS\x00dA\x00dB\x00d\x00d+\x00dD\x00d\x00d$\x00dE\x00d_\x00dF\x00dG\x00d$\x00dI\x00dJ\x00dP\x00dL\x00dM\x00d\x11\x00d\x1b\x00dC\x00d\x01\x01dO\x00dP\x00d\x08\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d\x1b\x00dU\x00dV\x00d\x00dX\x00d+\x00dL\x00dF\x00d\'\x00d\x03\x00d[\x00d\\\x00d\x12\x00d^\x00d_\x00d\x00da\x00d1\x00d\x00db\x00d_\x00d\x1f\x00dc\x00dH\x00d\x00dd\x00dd\x00d\x00d]\x00df\x00d\'\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d\x00do\x00dp\x00d\x00dr\x00ds\x00dM\x00du\x00dA\x00d\x00dC\x00d2\x00dH\x00dN\x00dx\x00d\\\x00dz\x00d{\x00d\x00d|\x00d \x00d\x00dU\x00dE\x00d>\x00d~\x00dO\x00d\x00dG\x00dC\x00dL\x00d\x00dF\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x11\x00d\x1b\x00d]\x00d9\x00dS\x00d:\x00dV\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d8\x00d{\x00d\x00d&\x00d^\x00d\x00d\x00d\x00d\x00d\x00dY\x00de\x00d_\x00dE\x00d\x00d\x00d\x00d\x00dT\x00d\x00d\x00d\x00d6\x00d.\x00d\x10\x00d`\x00d.\x00d\x00d\x00d\x00d\x00dN\x00da\x00d\x00d-\x00dr\x00d\x00d1\x00d\x00d(\x00d{\x00d+\x00d\n\x00d\x00d\x19\x00d\x00d\x00d\x00d\x12\x00d\x00d\x00d*\x00d\x00d\x00d\x00dn\x00dw\x00d!\x00d\x00d\x00d\x0b\x00d\x05\x00d3\x00d\x14\x00d\x08\x00di\x00d\x00d[\x00d\x00d:\x00d\x0e\x00d\\\x00d\x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x00d \x00d!\x00d\x00d#\x00d$\x00d\x15\x00d%\x00d&\x00d=\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00dK\x00d,\x00d-\x00da\x00d/\x00d0\x00d{\x00d1\x00d2\x00d*\x00d4\x00d5\x00d^\x00d7\x00d8\x00d1\x00d9\x00d:\x00d5\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d&\x00d\x00dB\x00d\x00d+\x00dD\x00d\x00d$\x00dE\x00d`\x00dF\x00dG\x00dA\x00dI\x00dJ\x00d\x00dS\x00dM\x00d\x00d\x1b\x00dC\x00d\x05\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d\x14\x00dU\x00dV\x00d\x12\x00dX\x00d+\x00dh\x00dF\x00d\'\x00d\x1b\x00d[\x00d\\\x00d\x00d^\x00d_\x00d#\x00da\x00d1\x00d/\x00db\x00d_\x00d"\x00dc\x00dH\x00d=\x00dd\x00dd\x00d\x13\x00d]\x00df\x00d_\x00d%\x00dI\x00d\x00dh\x00di\x00d\x19\x00dj\x00dk\x00d\x05\x00d\x17\x00dm\x00d[\x00do\x00dp\x00d\x00dr\x00ds\x00dz\x00du\x00dA\x00d9\x00dC\x00d2\x00d\x19\x00dN\x00dx\x00dy\x00dz\x00d{\x00d\x00d|\x00d \x00d\x00dU\x00dE\x00dV\x00d~\x00dO\x00dj\x00dG\x00dC\x00dR\x00d\x00dF\x00d\x00d\x00d\x00d\x0f\x00d\x00d\x00d;\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00d\x00d\x00d\x00d>\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d1\x00d{\x00d\x00dm\x00d^\x00d\x00d\x17\x00d\x00d\x00d\x00dY\x00de\x00d\x00dE\x00d\x00d^\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00d_\x00d\x00d\x00d\x00dN\x00da\x00d\x00d-\x00dr\x00d\x13\x00d1\x00d\x00d\x00d{\x00d+\x00d*\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x18\x00d\x00d\x00d\x11\x00dn\x00dw\x00dw\x00d\x00d\x00dR\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x00d[\x00d\x00d\\\x00d\x0e\x00d\\\x00d\x00d\x11\x00d\x00d<\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x00d \x00d!\x00d*\x00d#\x00d$\x00d:\x00d%\x00d&\x00dV\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d;\x00d,\x00d-\x00dk\x00d/\x00d0\x00dx\x00d1\x00d2\x00d\x00d4\x00d5\x00d\x00d7\x00d8\x00d\x00d9\x00d:\x00do\x00d<\x00d=\x00d\x06\x00d?\x00d\x0e\x00dT\x00dA\x00dB\x00d\x19\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x01\x01dF\x00dG\x00d\'\x00dI\x00dJ\x00d2\x00dL\x00dM\x00d\x00d\x1b\x00dO\x00d2\x00dl\x00d\x00d\x00dR\x00dS\x00dv\x00d\x00d\x00d`\x00dU\x00dV\x00d\x00dX\x00d+\x00d\x00dF\x00dr\x00d\x00d[\x00d\x00d\x00d^\x00dN\x00d\x00da\x00d\x00d\x00db\x00d_\x00d\x00dc\x00dH\x00d\x00dd\x00dd\x00d\x00d]\x00df\x00d\x05\x00d%\x00dI\x00d{\x00dh\x00di\x00d\x00dj\x00dk\x00d8\x00d\x17\x00dm\x00d\x00do\x00dp\x00dD\x00dr\x00ds\x00d\x00du\x00dA\x00d\x00dC\x00d2\x00d\x08\x00dN\x00dx\x00d|\x00dz\x00d{\x00d\x00d|\x00d \x00d\x17\x00dU\x00dE\x00dy\x00d~\x00dO\x00d\x00dG\x00dC\x00d\x00d\x00dF\x00d=\x00d\x00d\x00d\x1f\x00d\x00d\x00d\x00d\x1b\x00d]\x00d#\x00dS\x00d:\x00d\x00d\x00d\x00d\x00d\x05\x00dZ\x00d0\x00d5\x00dy\x00d\x00d{\x00d\x00d\x00d^\x00d\x00d)\x00d\x00d\x00d\x00dY\x00de\x00d\x00dE\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d6\x00d.\x00dR\x00d`\x00d.\x00dq\x00d\x00d\x00d\x00dN\x00da\x00d\x00d-\x00dr\x00d\x00d1\x00d\x00d\x00d{\x00d+\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00dr\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d\x11\x00d\x00d\x00dz\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d,\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00d\x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00dR\x00d\x18\x00d\x19\x00d\r\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x00d \x00d!\x00d\x00d#\x00d$\x00d\x7f\x00d%\x00d&\x00db\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d/\x00d/\x00d0\x00d?\x00d1\x00d2\x00db\x00d4\x00d5\x00d*\x00d7\x00d8\x00d)\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00dm\x00dA\x00dB\x00d\x00d+\x00dD\x00dq\x00d$\x00dE\x00df\x00dF\x00dG\x00dM\x00dI\x00dJ\x00dM\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00dR\x00dO\x00dP\x00d\x1d\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d4\x00dU\x00dV\x00d\x1b\x00dX\x00d+\x00d\x1d\x00dF\x00d\'\x00d@\x00d[\x00d\\\x00d\x00d^\x00d_\x00dF\x00da\x00d1\x00dg\x00db\x00d_\x00d\x00dc\x00dH\x00dq\x00dd\x00dd\x00d\x00d]\x00df\x00d\x00d%\x00dI\x00dK\x00dh\x00di\x00d\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d\x00do\x00dp\x00dN\x00dr\x00ds\x00dV\x00du\x00dA\x00d2\x00dC\x00d2\x00d]\x00dN\x00dx\x00dm\x00dz\x00d{\x00d\x00d|\x00d \x00d6\x00dU\x00dE\x00dk\x00d~\x00dO\x00d\x00dG\x00dC\x00d\x00d\x00dF\x00d\x10\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x1b\x00d]\x00d\x00dS\x00d:\x00dV\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x00d5\x00dy\x00d\x19\x00d{\x00d\x00d<\x00d^\x00d\x00dc\x00d\x00d\x00dG\x00dY\x00de\x00d\x18\x00dE\x00d\x00d\x01\x01d\x00d\x00d\x08\x00d\x00d\x00dB\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x0c\x00d\x00d\x00d"\x00dN\x00da\x00d@\x00d-\x00dr\x00dn\x00d1\x00d\x00dQ\x00d{\x00d+\x00d\x00d\x00d\x19\x00d\x00d\x00d\x00d\x00d\x00d\x00d!\x00d\x00d\x00d\x00dn\x00dw\x00dK\x00d\x00d\x00d\x00d\x05\x00d3\x00d,\x00d\x08\x00di\x00d\x10\x00d[\x00d\x00db\x00d\x0e\x00d\\\x00d\x00\x01d\x11\x00d\x00d\x00d\x00d\x00d\x00\x01d\x15\x00d\x16\x00d@\x00d\x18\x00d\x19\x00d\x00d\x1b\x00d\x00dL\x00d\x13\x00d\x00dF\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00d\x7f\x00d(\x00d)\x00d\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00dS\x00d/\x00d0\x00d\x00d1\x00d2\x00dP\x00d4\x00d5\x00d\x00d7\x00d8\x00d]\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d/\x00dA\x00dB\x00d\x00d+\x00dD\x00d\x00d$\x00dE\x00d\x00dF\x00dG\x00d2\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00d7\x00dR\x00dS\x00d\x00d#\x00d\x11\x00d]\x00dU\x00dV\x00dt\x00dX\x00d+\x00d\x00dF\x00d\'\x00d\x00d[\x00d\\\x00d\x00d^\x00d_\x00d\x00da\x00d1\x00d`\x00db\x00d_\x00dM\x00dc\x00dH\x00dd\x00dd\x00dd\x00dq\x00d]\x00df\x00d(\x00d%\x00dI\x00d\x00dh\x00di\x00d\x00dj\x00dk\x00d\x00d\x17\x00dm\x00d+\x00do\x00dp\x00d^\x00dr\x00ds\x00d(\x00du\x00dA\x00d?\x00dC\x00d2\x00d\x00dN\x00dx\x00dX\x00dz\x00d{\x00da\x00d|\x00d \x00d\x00dU\x00dE\x00d^\x00d~\x00dO\x00dV\x00dG\x00dC\x00d3\x00d\x00dF\x00ds\x00d\x00d\x00d{\x00d\x00d\x00d\r\x00d\x1b\x00d]\x00d_\x00dS\x00d:\x00d4\x00d\x00d\x00d\x00d\x05\x00dZ\x00d\x18\x00d5\x00dy\x00d\x13\x00d{\x00d\x00d\x00d^\x00d\x00dV\x00d\x00d\x00d7\x00dY\x00de\x00d\x00dE\x00d\x00d\x00d\x00d\x00ds\x00d\x00d\x00d\x00d6\x00d.\x00d\x00d`\x00d.\x00d\x00d\x00d\x00d\x00dN\x00da\x00d\x00d-\x00dr\x00d<\x00d1\x00d\x00d\x00d{\x00d+\x00d\x17\x00d\x00d\x19\x00d+\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00dn\x00dw\x00d<\x00d\x00d\x00d\x00d\x05\x00d3\x00d\x00d\x08\x00di\x00d\x00d[\x00d\x00d\x00d\x0e\x00d\\\x00d\x00d\x11\x00d\x00d\x00d\x00d\x00d\x00d\x15\x00d\x16\x00d4\x00d\x18\x00d\x19\x00d\x1d\x00d\x1b\x00d\x00dE\x00d\x13\x00d\x00d[\x00d \x00d!\x00d\x00d#\x00d$\x00d\x00d%\x00d&\x00d%\x00d(\x00d)\x00dd\x00d*\x00d\x0b\x00d\x00d,\x00d-\x00d!\x00d/\x00d0\x00d\x00d1\x00d2\x00d\x00d4\x00d5\x00d\x00d7\x00d8\x00d\x00d9\x00d:\x00d\x00d<\x00d=\x00d\x00d?\x00d\x0e\x00d\x00dA\x00dB\x00d\x00d+\x00dD\x00d\x13\x00d$\x00dE\x00d\x00dF\x00dG\x00d?\x00dI\x00dJ\x00d\x00dL\x00dM\x00d\x00d\x1b\x00dC\x00d\x00dO\x00dP\x00d\x00dR\x00dS\x00d\x1c\x00d#\x00d\x11\x00dB\x00dU\x00dV\x00d\'\x00dX\x00d+\x00d\x00dF\x00d\'\x00d\x00d[\x00d\\\x00d\x00d^\x00d_\x00d\x00da\x00d1\x00db\x00db\x00d\x00d\x00dc\x00d\x00d\x00dd\x00d\x00d\x02\x00d]\x00dm\x00d\'\x00d%\x00d\x00d{\x00dh\x00d*\x00d\x00dj\x00d\x00d^\x00d\x17\x00dt\x00d\x00do\x00d\x00d%\x00d\x00df\x00d\x00du\x00d\x00d\x03\x00d\x00d\x00d\x00dV\x00d\x00d\x00d`\x00d,\x00dv\x00d\x00dn\x00d\x00d|\x00dJ\x00d\x00d\x0f\x00dM\x00d\x00d~\x00d\x00d\x00d\x00d\x00d\x18\x00d\x00d\x00d\x00d\x00d\x03\x00d\x00d\x00dw\x00dU\x00dL\x00d\x13\x00d\x00d\x00d\x00dB\x00d\x0e\x00d\x00d\x00d\x00d8\x00d4\x00do\x00d\x00d+\x00dJ\x00d^\x00dV\x00d\x00d\x19\x00dr\x00d\x00d\x00d\x00dJ\x00d*\x00d>\x00d\x00d\x00d{\x00d\x00d`\x00d$\x00d6\x00d.\x00d}\x00d`\x00d\x00d~\x00dl\x00ds\x00d\x18\x00d\x00dn\x00d\x00d\x0c\x00d\x00d\x00d\x00d\x00d,\x00d\x00d\x00d=\x00d\x00dX\x00d\x00d\x00d`\x00d\x00d\x00d\x00dG\x00d\x00dy\x00d\x00d\x00d\x00d\x00d\x00d\x03\x00d\x00d\x00d\x00d\x00d\x00d\x00d\x00dT\x00de\x00d\r\x00d\x0e\x00dh\x00d\x00d\x11\x00d\x12\x00dZ\x00di\x00d\x17\x00d\x14\x00d\x15\x00d%\x00d\x11\x00d\x10\x00dU\x00d\x0c\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x1f\x00d\x00d\x00d\x00d#\x00d\x00d;\x00d\x00d\x00d\x1f\x00d(\x00d;\x00d}\x00d*\x00d\x00d\x00dk\x00d\x00d\x00d/\x00d[\x00d\x00d\x00d\x00d\x00d4\x00d\x00d\x16\x00d7\x00d\x00d?\x00d\x00d\x00d\x00d<\x00d\x00d\x00d\x00d\x11\x00d&\x00dA\x00d\x1d\x00dA\x00d+\x00d\x00d\x00db\x00d\x00dz\x00dF\x00d\x00d \x00d\x00dE\x00d\x00dL\x00d\x00d\x00d\x1b\x00d\x00d>\x00d\x00d\x00d\x15\x00dR\x00d\x00d>\x00d\x0b\x00d\x0e\x00d\x00dU\x00d6\x00dt\x00dX\x00d9\x00d\x00d\x00dJ\x00d\x00d[\x00d\x00dp\x00d\x0e\x00d\x00dQ\x00da\x00d@\x00d\x14\x00db\x00d\x00d\x08\x00d\x00d^\x00dW\x00dd\x00dj\x00d\x00d\x00d\x00d\x00d%\x00d\x00d\x16\x00dh\x00d\t\x00dP\x00d7\x00d\x10\x00dJ\x00d\x17\x00d\x00d\x00d\x00d\r\x00d\x00dr\x00d9\x00d\x16\x00du\x00d\x00d\x00d\x00d\x0e\x00d\x00dN\x00d\x00d\x00dw\x00d,\x00d\x00d|\x00d\x00d8\x00dU\x00dJ\x00d?\x00d\x00dl\x00d\x1f\x00dG\x00d\x00d\x1b\x00d\x00d\x00d\x05\x00d\x00do\x00d\x00d\x00d\x03\x00d\n\x00d\x00d\x00dO\x00dS\x00d\x13\x00d\x1d\x00d\x00d\x00d\x00d\x05\x00d\x00d\x00d5\x00d\x00d4\x00d\x00d\x00d\x00d^\x00d^\x00dQ\x00d\x00dC\x00dr\x00dY\x00d\x0c\x00d3\x00dE\x00d=\x00d>\x00d\x00dR\x00d?\x00d\x00d\x00d\x1d\x00d\x00dL\x00d\x00d`\x00d\x00d\x00d\x00ds\x00d\x00dx\x00d\x00db\x00d-\x00d\x00d\x00dV\x00d\x00d,\x00d{\x00d\x00d\x19\x00d\x00dU\x00d\x00di\x00d9\x00d\x00d\x00d_\x00d!\x00d"\x00dy\x00ds\x00dn\x00d\x00d\x00d\x00d\x03\x00d\x04\x00d_\x00d\x00d\x07\x00d\x08\x00d\t\x00d\x00d\x00de\x00d\r\x00d\x0e\x00dh\x00d\x00d\x11\x00d\x12\x00dZ\x00di\x00d\x00d\x14\x00d\x15\x00d%\x00d\x11\x00d\x00dU\x00d\x0c\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x1f\x00d\x00dd\x00d\x00d#\x00d\x00d;\x00d\x00d\x00d\x1f\x00d(\x00d;\x00d\x00d*\x00d\x00d\x00dk\x00d \x00d\x00d/\x00d[\x00d\x00d\x00d\x00d\x00d4\x00d\x00dW\x00d7\x00d\x00d?\x00d\x00dE\x00d\x00d<\x00d\x00d\x00d\x1c\x00d\x11\x00d&\x00dA\x00d\x1d\x00d\x00d+\x00d\x00d\x00db\x00d9\x00dz\x00dF\x00d\x00d \x00dI\x00dE\x00d\x00dL\x00d\x00d\x1c\x00d\x1b\x00d\x00d>\x00d\x00dm\x00d\x15\x00dR\x00d\x00d>\x00d\x00d\x0e\x00d\x00dU\x00d6\x00d\x00dX\x00d9\x00d\x00d\x00d\x00d\x00d[\x00d\x00dp\x00d\x1b\x00d\x00dQ\x00da\x00d@\x00d\x04\x00db\x00d\x00d\x08\x00d\x00dY\x00dW\x00dd\x00dj\x00d\x00d\x00d\x00d\x00d%\x00d\x00dj\x00dh\x00d\t\x00dP\x00d7\x00d\x00dJ\x00d\x17\x00d\x00d\x00d\x1e\x00d\r\x00d\x00dr\x00d9\x00d\x00du\x00d\x00d\x00d\x00dJ\x00d\x00dN\x00d\x00d\x00d+\x00d,\x00d\x00d|\x00d\x00d\x00dU\x00dJ\x00d?\x00d\x00d\x00d\x1f\x00dG\x00d\x00d\x1b\x00dA\x00d\x00d\x05\x00d\x00do\x00db\x00d\x00d\x03\x00d\n\x00d\x00d\x00dO\x00dS\x00d\x13\x00d\x1d\x00d}\x00d\x00d\x00d\x05\x00d\x00d>\x00d5\x00d\x00d4\x00d\x00dq\x00d\x00d^\x00d^\x00dQ\x00d\x0f\x00dC\x00dr\x00dY\x00d\x0c\x00d\x00dE\x00d=\x00d>\x00d\x00dT\x00d?\x00d\x00d\x00d\x1d\x00d9\x00dL\x00d\x00d`\x00d\x00d\x1c\x00d\x00ds\x00d\x00dx\x00de\x00db\x00d-\x00d\x00d\x00d7\x00d\x00d,\x00d{\x00d\x00d\x00d\x00dU\x00d\x00di\x00d|\x00d\x00d\x00d_\x00d!\x00dR\x00dy\x00ds\x00dn\x00d\x00d\x00d\x00d\x03\x00d\x04\x00d_\x00dE\x00d\x07\x00d\x08\x00d\t\x00d\x00dq\x00de\x00d\r\x00d\x0e\x00dh\x00dI\x00d\x11\x00d\x12\x00dZ\x00di\x00d\x00d\x14\x00d\x15\x00d%\x00d\x11\x00d)\x00dU\x00d\x0c\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x1f\x00d\x00d\x00d\x00d#\x00d\x00d;\x00d\x18\x00d\x00d\x1f\x00d(\x00d;\x00d\x10\x00d*\x00d\x00d\x00dk\x00d\x12\x00d\x00d/\x00d[\x00d\x00d]\x00d\x00d\x00d4\x00d\x00d\x00d7\x00d\x00d?\x00d\x00d(\x00d\x00d<\x00d\x00d\x00d\x00d\x11\x00d&\x00dA\x00d\x1d\x00d1\x00d+\x00d\x00d\x00db\x00d\x1a\x00dz\x00dF\x00d\x00d \x00d\x00dE\x00d\x00dL\x00d\x00d\x00d\x1b\x00d\x00d>\x00d\x00dp\x00d\x15\x00dR\x00d\x00d>\x00d\x00d\x0e\x00d\x00dU\x00d6\x00d\x00dX\x00d9\x00d\x00d\x00d\x18\x00d\x00d[\x00d\x00dp\x00d\x00d\x00dQ\x00da\x00d@\x00d%\x00db\x00d\x00d\x08\x00d\x00d,\x00dW\x00dd\x00dj\x00d\x00d\x1d\x00d\x00d\x00d%\x00d\x00d\r\x00dh\x00d\t\x00dP\x00d7\x00d\x00dJ\x00d\x17\x00d\x00d\x00dY\x00d\r\x00d\x00dr\x00d9\x00d\x00du\x00d\x00d\x00d\x00d\x00d\x00dN\x00d\x00d\x00d$\x00d,\x00d\x00d|\x00d\x00d\x00dU\x00dJ\x00d?\x00d\x00d\x1c\x00d\x1f\x00dG\x00d\x00d\x1b\x00d!\x00d\x00d\x05\x00d\x00do\x00d-\x00d\x00d\x03\x00d\n\x00d\x00d\x00dO\x00dS\x00d\x13\x00d\x1d\x00d9\x00d\x00d\x00d\x05\x00d\x00d\x00d5\x00d\x00d4\x00d\x00d\x00d\x00d^\x00d^\x00dQ\x00d\x00dC\x00dr\x00dY\x00d\x0c\x00dO\x00dE\x00d=\x00d>\x00d\x00d\x00d?\x00d\x00d\x00d\x1d\x00d\x00dL\x00d\x00d`\x00d\x00d\x00d\x00ds\x00d\x00dx\x00db\x00db\x00d-\x00d\x00d\x00d\x00d\x00d,\x00d{\x00d\x00dV\x00d\x00dU\x00d\x00di\x00d\x00d\x00d\x00d_\x00d!\x00d\x00dy\x00ds\x00dn\x00d\x00d\x00d\x00d\x03\x00d\x04\x00d_\x00d\x19\x00d\x07\x00d\x08\x00d\t\x00d\x00d\x00de\x00d\r\x00d\x0e\x00dh\x00df\x00d\x11\x00d\x12\x00dZ\x00di\x00d\x07\x00d\x14\x00d\x15\x00d%\x00d\x11\x00d\x00dU\x00d\x0c\x00d\x1b\x00d\x00d\x00d\x13\x00d\x00d\x1f\x00d\x00d\x00d\x00d#\x00d\x00d;\x00dE\x00d\x00d\x1f\x00d(\x00d;\x00d\x00d*\x00d\x00d\x00dk\x00dK\x00d\x00d/\x00d[\x00d\x00d\x00d\x00d\x00d4\x00d\x00d\x00d7\x00d\x00d?\x00d\x00d/\x00d\x00d<\x00d\x00d\x00dI\x00d\x11\x00d&\x00dA\x00d\x1d\x00d\x00d+\x00d\x00d\x00db\x00d\x00dz\x00dF\x00d\x00d \x00dQ\x00dE\x00d\x00dL\x00d\x00d\x00d\x1b\x00d\x00d>\x00d\x00d\x1c\x00d\x15\x00dR\x00d\x00d>\x00d\x00d\x0e\x00d\x00dU\x00d6\x00d<\x00dX\x00d9\x00d\x00d\x00d\x00d\x00d[\x00d\x00dp\x00d\t\x00d\x00dQ\x00da\x00d@\x00d\r\x00db\x00d\x00d\x08\x00d\x00dh\x00dW\x00dd\x00dj\x00d\x00d:\x00d\x00d\x00d%\x00d\x00d\x00dh\x00d\t\x00dP\x00d7\x00d\x00dJ\x00d\x17\x00d\x00d\x00d8\x00d\r\x00d\x00dr\x00d9\x00dp\x00du\x00d\x00d\x00d\x00d\x00d\x00dN\x00d\x00d\x00d\x10\x00d,\x00d\x00d|\x00d\x00d\x00dU\x00dJ\x00d?\x00d\x00d\x00d\x1f\x00dG\x00d\x00d\x1b\x00da\x00d\x00d\x05\x00d\x00do\x00d\x00d\x00d\x03\x00d\n\x00d\x00d\x00dO\x00dS\x00d\x13\x00d\x1d\x00d7\x00d\x00d\x00d\x05\x00d\x00d\x00d5\x00d\x00d4\x00d\x00d\x1e\x00d\x00d^\x00d^\x00dQ\x00d\x00dC\x00dr\x00dY\x00d\x0c\x00dP\x00dE\x00d=\x00d>\x00d\x00d\x00d?\x00d\x00d\x00d\x1d\x00d\x00dL\x00d\x00d`\x00d\x00d\x14\x00d\x00ds\x00d\x00dx\x00dE\x00db\x00d-\x00d\x00d\x00d\x00d\x00d,\x00d{\x00d\x00d\x00d\x00dU\x00d\x00di\x00d\x00d\x00d\x00d_\x00d!\x00d3\x00dy\x00ds\x00dn\x00d\x00d\x12\x00d\x00d\x03\x00d\x04\x00d_\x00d\x00d\x07\x00d\x08\x00d\t\x00d\x00d\r\x00de\x00d\r\x00d\x0e\x00dh\x00d\x00d\x11\x00d\x12\x00dZ\x00di\x00d?\x00d\x14\x00d\x15\x00d%\x00d\x11\x00dO\x00dU\x00d\x0c\x00d\x1b\x00d\x00d{\x00d\x13\x00d\x00d\x1f\x00d\x00d\x00d\x00d#\x00d\x00d;\x00dn\x00d\x00d\x1f\x00d(\x00d;\x00d\x00d*\x00d\x00d\x00dk\x00db\x00d\x00d/\x00d[\x00d\x00d\x00d\x00d\x00d4\x00d\x00d\x00d7\x00d\x00d?\x00d\x00dG\x00d\x00d<\x00d\x00d\x00d\x00d\x11\x00d&\x00dA\x00d\x1d\x00d*\x00d+\x00d\x00d\x00db\x00dP\x00dz\x00dF\x00d\x00d \x00d9\x00dE\x00d\x00dL\x00d\x00dA\x00d\x1b\x00d\x00d>\x00d\x00d\x00d\x15\x00dR\x00d\x00d>\x00d\x00d\x0e\x00d\x00dU\x00d6\x00d\x00dX\x00d9\x00d\x00d\x00d\x00d\x00d[\x00d\x00dp\x00d\x06\x00d\x00dQ\x00da\x00d@\x00d\x00db\x00d\x00d\x08\x00d\x00d\x00dW\x00dd\x00dj\x00d\x00d\x00d\x00d\x00d%\x00d\x00dH\x00dh\x00d\t\x00dP\x00d7\x00dy\x00dJ\x00d\x17\x00d\x00d\x00d\x00d\r\x00d\x00dr\x00d9\x00d\x00du\x00d\x00d\x00d\x00dh\x00d\x00dN\x00d\x00d\x00d\x00d,\x00d\x00d|\x00d\x00dV\x00dU\x00dJ\x00d?\x00d\x00dU\x00d\x1f\x00dG\x00d\x00d\x1b\x00d\x00d\x00d\x05\x00d\x00do\x00d{\x00d\x00d\x03\x00d\n\x00d\x00d&\x00dO\x00dS\x00d\x13\x00d\x1d\x00d=\x00d\x00d\x00d\x05\x00d\x00d\x00d5\x00d\x00d4\x00d\x00d\x00d\x00d^\x00d^\x00dQ\x00d9\x00dC\x00dr\x00dY\x00d\x0c\x00d\x00dE\x00d=\x00d>\x00d\x00dj\x00d?\x00d\x00d\x00d\x1d\x00d\x00dL\x00d\x00d`\x00d\x00d\x15\x00d\x00ds\x00d\x00dx\x00d\x00db\x00d-\x00d\x00d\x00d\x00d\x00d,\x00d{\x00d\x00d\x00d\x00dU\x00d\x00di\x00d\x00d\x00d\x00d_\x00d!\x00d\x00dy\x00ds\x00dn\x00d\x00di\x00d\x00d\x03\x00d\x04\x00d_\x00d\x13\x00d\x07\x00d\x08\x00d\t\x00d\x00d\x11\x00de\x00d\r\x00d\x0e\x00d,\x00d\x10\x00d\x11\x00d\x12\x00dZ\x00d \x00d\x00d\x14\x00d\x15\x00d%\x00d\x16\x00d_\x00dU\x00d\x0c\x00d\x1b\x00dp\x00dx\x00d\x00d;\x00d\x00d_\x00d\x00d\x00d\x00d\x00d\x00d%\x00d&\x00dm\x00d\x00d\x00d\x00d*\x00d\x00d\x00d\x00d\x1a\x00d\x00d/\x00dg\x00d*\x00dR\x00d\x00d\x00d4\x00d\x00d\x00dM\x00d\x00d?\x00d9\x00dd\x00d \x00d\x00d\x00d\x00d?\x00d\x00dV\x00d,\x00d\x00d\x00d+\x00d\x00d\x00dV\x00dy\x00d\x00d5\x00d\x1c\x00d\x00dI\x00dE\x00d|\x00dC\x00dE\x00d\x00d\x00dM\x00d>\x00dO\x00d~\x00d\x15\x00d\x00d\x00d\x00d#\x00d\x0e\x00dx\x00dU\x00d\x00d\x1d\x00dX\x00d\x04\x00d\x00dF\x00d\x00d\x00d8\x00d\x00d\x00d"\x00d\x00d\x00dN\x00d\x00d@\x00d\x00d\x00d\x08\x00dc\x00d\x00d+\x00d\x00dd\x00d~\x00do\x00dm\x00d\x00dt\x00dh\x00dp\x00dh\x00d/\x00d\x00dj\x00d\x00dJ\x00d\x00d\x00dG\x00do\x00d\x00d\x00d0\x00d\x00d\x00du\x00d\x00d\x00dt\x00d\x00d\x1b\x00dN\x00d\\\x00d\x00d\x00d,\x00d\x00d|\x00d#\x00d%\x00d\x00dJ\x00d\t\x00d~\x00d\x00d\x1f\x00d*\x00d\x00d\x00d\x00d\x00d\x05\x00d>\x00d\x00dP\x00d\x00d\x00d\n\x00d\x00dw\x00d(\x00dS\x00dB\x00dR\x00d\x00d\x00d\x00d\x05\x00dL\x00d\x00d\x00d\x00dI\x00d{\x00d$\x00d\x00d\x00d^\x00de\x00d\x00dt\x00dr\x00d-\x00d\x00d\x05\x00dE\x00d\x1e\x00d>\x00dA\x00d\x02\x00d\t\x00d\x00d\x00dR\x00d8\x00dL\x00dS\x00d`\x00d\x1c\x00dh\x00d\x00d\x00d\x00d\x00d\x00db\x00dY\x00d\x00d\n\x00d1\x00d\x0e\x00d\x00d?\x00d\x00d\x00d\x00d\x00d\x00d$\x00d\x00d\t\x00d\x00d\x00d?\x00d\x1d\x00d\x00d\x00d\x00g1Z\x01\x00d\x02\x01Z\x02\x00d\x00d\x00f\x02\x00\\\x02\x00Z\x03\x00Z\x04\x00d\x00d\x03\x00d\x04\x00d\x05\x00d\x06\x00d\x07\x00d\x08\x00d\t\x00d\n\x00d[\x00de\x00d\r\x00d\x0e\x00d\x00d\x10\x00d\x11\x00d\x12\x00dZ\x00d\x00dC\x00d\x14\x00d\x15\x00d%\x00d\x17\x00d\x18\x00dU\x00d\x0c\x00d\x1b\x00d!\x00d\x1d\x00d\x13\x00d\x00d\x1f\x00d \x00d\x00d\x00d#\x00d\x00d\x00d%\x00d\x00d\x1f\x00d(\x00d\x00d\x00d*\x00d\x00d\x00d,\x00d\x1a\x00d\x00d/\x00d[\x00d\x00d1\x00d\x00d\x00d4\x00d\x00d\x00d7\x00d\x00d?\x00d9\x00d\x13\x00d\x00d<\x00d\x00d\x00d?\x00d\x11\x00d&\x00dA\x00dR\x00d\x00d+\x00d\x00d\x00d$\x00dJ\x00dz\x00dF\x00d\x00d\x00dI\x00dE\x00d\x00dL\x00d\x00d\x00d\x1b\x00d\x00d>\x00dO\x00d~\x00d\x15\x00dR\x00d\x00d\x00d#\x00d\x0e\x00d\x00dU\x00d\x00d\x1d\x00dX\x00d9\x00d\x00dF\x00d\x00d\x00d[\x00d\x00d\x00d^\x00d\x00dQ\x00da\x00d\x00d9\x00db\x00d\x00d\x08\x00dc\x00d\x00dW\x00dd\x00dj\x00dC\x00d]\x00d\x00d\x00d%\x00dh\x00dp\x00dh\x00d\t\x00dP\x00dj\x00d\x00dJ\x00d\x17\x00d\x00d*\x00do\x00d\r\x00d\x00dr\x00d\x00d\x00du\x00d\x00d\x00dC\x00d\x00d\x00dN\x00d\x00d\x00dz\x00d,\x00d\x00d|\x00dn\x00d%\x00dU\x00dJ\x00d?\x00d~\x00dX\x00d\x1f\x00dG\x00d\x00d\x00d\x00d\x00d\x05\x00d\x00d\x00d\x17\x00d\x00d\x03\x00d\n\x00d\x1b\x00dw\x00dO\x00dS\x00d\x13\x00dR\x00d\x00d\x00d\x00d\x05\x00d\x00d\x00d5\x00d\x00d4\x00d{\x00d\x00d\x00d^\x00d^\x00d\x00d\x00dC\x00dr\x00dY\x00d\x00d\x00dE\x00d=\x00d>\x00d\x00d\x02\x00d?\x00d\x00d\x00dR\x00d6\x00dL\x00d\x00d`\x00dL\x00d\x00d\x00ds\x00d\x00dN\x00dW\x00db\x00d-\x00d\x00d\x00d1\x00d\x00d,\x00d{\x00d9\x00d=\x00d\x00dU\x00d\x00d\x00d\x00d\x00d\x00d_\x00d\x00d\x00dy\x00ds\x00dn\x00d]\x00d\x14\x00g\x00Z\x05\x00xe\x00e\x03\x00e\x06\x00e\x01\x00\x01\x00k\x05\x00rPn\x00\x00e\x04\x00e\x06\x00e\x05\x00\x01\x00k\x05\x00rŘd\x00Z\x04\x00n\x00\x00e\x02\x00e\x07\x00e\x01\x00e\x03\x00\x19e\x05\x00e\x04\x00\x19A\x01\x007Z\x02\x00e\x03\x00d?\x007Z\x03\x00e\x04\x00d?\x007Z\x04\x00qWe\x00\x00j\x08\x00e\x02\x00\x01\x00d\x01\x00\x04Ud\x01\x00S(\x03\x01\x00\x00iNi0\x00\x00\x00i+\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\n\x00\x00\x00i9\x00\x00\x00i\x00\x00\x00iH\x00\x00\x00i\x17\x00\x00\x00iG\x00\x00\x00i`\x00\x00\x00i\x00\x00\x00iR\x00\x00\x00in\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i~\x00\x00\x00i&\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00iW\x00\x00\x00iF\x00\x00\x00i\x00\x00\x00i\x05\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00ii\x00\x00\x00i\x0b\x00\x00\x00i@\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00if\x00\x00\x00i"\x00\x00\x00i^\x00\x00\x00i\x00\x00\x00i(\x00\x00\x00i\x00\x00\x00iX\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x18\x00\x00\x00ip\x00\x00\x00i\x00\x00\x00i\x1a\x00\x00\x00i\x00\x00\x00i$\x00\x00\x00iu\x00\x00\x00i\x03\x00\x00\x00i\x01\x00\x00\x00i\x00\x00\x00i\x08\x00\x00\x00i\x00\x00\x00iU\x00\x00\x00i\x00\x00\x00i\x1c\x00\x00\x00i\x00\x00\x00id\x00\x00\x00i\x00\x00\x00i\x1f\x00\x00\x00ix\x00\x00\x00iK\x00\x00\x00i:\x00\x00\x00i\x19\x00\x00\x00i\x00\x00\x00iV\x00\x00\x00iD\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i;\x00\x00\x00i|\x00\x00\x00i3\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i2\x00\x00\x00i\x00\x00\x00i\x00\x00\x00iL\x00\x00\x00iv\x00\x00\x00iw\x00\x00\x00iy\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x15\x00\x00\x00iJ\x00\x00\x00i\x00\x00\x00i%\x00\x00\x00i{\x00\x00\x00i\x00\x00\x00iq\x00\x00\x00i\x0f\x00\x00\x00i[\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x1b\x00\x00\x00i\x04\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i>\x00\x00\x00ih\x00\x00\x00i\x13\x00\x00\x00i\x00\x00\x00i\x10\x00\x00\x00i\x00\x00\x00i\x02\x00\x00\x00i6\x00\x00\x00i*\x00\x00\x00i \x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x16\x00\x00\x00i\x00\x00\x00iO\x00\x00\x00ig\x00\x00\x00i\x00\x00\x00i\x00\x00\x00iz\x00\x00\x00i\x1d\x00\x00\x00i1\x00\x00\x00i\x14\x00\x00\x00i\x11\x00\x00\x00i\x00\x00\x00iT\x00\x00\x00ij\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i_\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00iY\x00\x00\x00i5\x00\x00\x00ik\x00\x00\x00i,\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00it\x00\x00\x00ie\x00\x00\x00iS\x00\x00\x00i\r\x00\x00\x00i.\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x06\x00\x00\x00iE\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\t\x00\x00\x00i\x00\x00\x00i\x00\x00\x00iM\x00\x00\x00i\x0e\x00\x00\x00i\x00\x00\x00iZ\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i)\x00\x00\x00i\\\x00\x00\x00iQ\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i8\x00\x00\x00i\x00\x00\x00i=\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i]\x00\x00\x00i4\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x1e\x00\x00\x00i\x00\x00\x00i\x00\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x07\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i#\x00\x00\x00io\x00\x00\x00i?\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00im\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i7\x00\x00\x00i-\x00\x00\x00i\x00\x00\x00i}\x00\x00\x00i\'\x00\x00\x00i\x00\x00\x00iB\x00\x00\x00iP\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00iC\x00\x00\x00i\x00\x00\x00i\x12\x00\x00\x00i\x7f\x00\x00\x00iI\x00\x00\x00i\x00\x00\x00ia\x00\x00\x00is\x00\x00\x00ic\x00\x00\x00i\x00\x00\x00il\x00\x00\x00iA\x00\x00\x00i\x00\x00\x00i<\x00\x00\x00i!\x00\x00\x00i\x0c\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i\x00\x00\x00ir\x00\x00\x00i\x00\x00\x00iN\x00\x00\x00i\x00\x00\x00i\x00\x00\x00i/\x00\x00\x00ib\x00\x00\x00t\x00\x00\x00\x00(\t\x00\x00\x00t\x07\x00\x00\x00marshalt\x01\x00\x00\x00dt\x01\x00\x00\x00et\x01\x00\x00\x00it\x01\x00\x00\x00jt\x01\x00\x00\x00kt\x03\x00\x00\x00lent\x03\x00\x00\x00chrt\x05\x00\x00\x00loads(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x08\x00\x00\x00<script>t\x08\x00\x00\x00<module>\x01\x00\x00\x00sL\x01\x00\x00\x0c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x06\x01\x12\x01\x00\x00\x00\x06\x01\x03\x01\x12\x00\x04\x01\x12\x00\t\x01\x1c\x01\n\x01\x0e\x01')
# okay decompiling ME.pyc
| 8,414.6
| 83,876
| 0.777292
| 15,825
| 84,146
| 4.125245
| 0.011817
| 0.598021
| 0.573144
| 0.525229
| 0.9273
| 0.888407
| 0.850924
| 0.823152
| 0.784106
| 0.738902
| 0
| 0.377003
| 0.001806
| 84,146
| 9
| 83,877
| 9,349.555556
| 0.400219
| 0.00284
| 0
| 0
| 0
| 15.5
| 0.634763
| 0.633917
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 19
|
78aed7125fd2567620e735b709cded9395ed76f4
| 12,306
|
py
|
Python
|
tests/test_reshape.py
|
airbus/decomon
|
f3668fbd8edd0def4e23aa0634eebfec58349c35
|
[
"MIT"
] | 11
|
2021-11-03T12:09:50.000Z
|
2022-02-20T21:42:13.000Z
|
tests/test_reshape.py
|
airbus/decomon
|
f3668fbd8edd0def4e23aa0634eebfec58349c35
|
[
"MIT"
] | 1
|
2022-02-18T13:40:46.000Z
|
2022-02-18T13:40:46.000Z
|
tests/test_reshape.py
|
airbus/decomon
|
f3668fbd8edd0def4e23aa0634eebfec58349c35
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import pytest
import numpy as np
from . import (
get_standard_values_images_box,
get_tensor_decomposition_images_box,
assert_output_properties_box,
assert_output_properties_box_linear,
)
import tensorflow.python.keras.backend as K
from tensorflow.keras.layers import Reshape, Permute
from decomon.layers.decomon_layers import to_monotonic
from decomon.layers.decomon_reshape import DecomonPermute, DecomonReshape
@pytest.mark.parametrize(
"odd, m_0, m_1, mode, floatx",
[
(0, 0, 1, "hybrid", 32),
(0, 0, 1, "forward", 32),
(0, 0, 1, "ibp", 32),
(0, 0, 1, "hybrid", 64),
(0, 0, 1, "forward", 64),
(0, 0, 1, "ibp", 64),
(0, 0, 1, "hybrid", 16),
(0, 0, 1, "forward", 16),
(0, 0, 1, "ibp", 16),
],
)
def test_Decomon_reshape_box(odd, m_0, m_1, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 1
# monotonic_layer = DecomonConv2D(10, kernel_size=(3, 3), activation="relu", dc_decomp=True, mode=mode,
# data_format=data_format)
inputs = get_tensor_decomposition_images_box("channels_last", odd)
inputs_ = get_standard_values_images_box("channels_last", odd, m0=m_0, m1=m_1)
x, y, z, u_c, W_u, b_u, l_c, W_l, b_l, h, g = inputs
x_ = inputs_[0]
z_ = inputs_[2]
target_shape = (np.prod(y.shape[1:]),)
y_ = np.reshape(inputs_[1], (-1, target_shape[0]))
monotonic_layer = DecomonReshape((target_shape), dc_decomp=True, mode=mode)
if mode == "hybrid":
output = monotonic_layer(inputs[2:])
if mode == "forward":
output = monotonic_layer([z, W_u, b_u, W_l, b_l, h, g])
if mode == "ibp":
output = monotonic_layer([u_c, l_c, h, g])
f_reshape = K.function(inputs[2:], output)
if mode == "hybrid":
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_ = f_reshape(inputs_[2:])
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_, h_, g_ = f_reshape(inputs_[2:])
u_c_ = None
l_c_ = None
if mode == "ibp":
u_c_, l_c_, h_, g_ = f_reshape(inputs_[2:])
w_u_, b_u_, w_l_, b_l_ = [None] * 4
assert_output_properties_box(
x_,
y_,
h_,
g_,
z_[:, 0],
z_[:, 1],
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"reshape_{}_{}_{}".format(odd, m_0, m_1),
decimal=decimal,
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
@pytest.mark.parametrize(
"odd, m_0, m_1, mode, floatx",
[
(0, 0, 1, "hybrid", 32),
(0, 0, 1, "forward", 32),
(0, 0, 1, "ibp", 32),
(0, 0, 1, "hybrid", 64),
(0, 0, 1, "forward", 64),
(0, 0, 1, "ibp", 64),
(0, 0, 1, "hybrid", 16),
(0, 0, 1, "forward", 16),
(0, 0, 1, "ibp", 16),
],
)
def test_Decomon_reshape_box_nodc(odd, m_0, m_1, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 1
# monotonic_layer = DecomonConv2D(10, kernel_size=(3, 3), activation="relu", dc_decomp=True, mode=mode,
# data_format=data_format)
inputs = get_tensor_decomposition_images_box("channels_last", odd, dc_decomp=False)
inputs_ = get_standard_values_images_box("channels_last", odd, m0=m_0, m1=m_1, dc_decomp=False)
x, y, z, u_c, W_u, b_u, l_c, W_l, b_l = inputs
x_ = inputs_[0]
z_ = inputs_[2]
target_shape = (np.prod(y.shape[1:]),)
y_ = np.reshape(inputs_[1], (-1, target_shape[0]))
monotonic_layer = DecomonReshape((target_shape), dc_decomp=False, mode=mode)
if mode == "hybrid":
output = monotonic_layer(inputs[2:])
if mode == "forward":
output = monotonic_layer([z, W_u, b_u, W_l, b_l])
if mode == "ibp":
output = monotonic_layer([u_c, l_c])
f_reshape = K.function(inputs[2:], output)
if mode == "hybrid":
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_ = f_reshape(inputs_[2:])
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_ = f_reshape(inputs_[2:])
u_c_ = None
l_c_ = None
if mode == "ibp":
u_c_, l_c_ = f_reshape(inputs_[2:])
w_u_, b_u_, w_l_, b_l_ = [None] * 4
assert_output_properties_box(
x_,
y_,
None,
None,
z_[:, 0],
z_[:, 1],
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"reshape_{}_{}_{}".format(odd, m_0, m_1),
decimal=decimal,
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
@pytest.mark.parametrize(
"odd, m_0, m_1, shared, floatx",
[
(0, 0, 1, False, 32),
(0, 0, 1, True, 32),
(0, 0, 1, False, 64),
(0, 0, 1, True, 64),
(0, 0, 1, False, 16),
(0, 0, 1, True, 16),
],
)
def test_Decomon_reshape_to_monotonic_box(odd, m_0, m_1, shared, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 4
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 1
inputs = get_tensor_decomposition_images_box("channels_last", odd)
inputs_ = get_standard_values_images_box("channels_last", odd, m0=m_0, m1=m_1)
x, y, z, u_c, W_u, b_u, l_c, W_l, b_l, h, g = inputs
x_ = inputs_[0]
z_ = inputs_[2]
target_shape = (np.prod(y.shape[1:]),)
reshape_ref = Reshape(target_shape)
output_ref = reshape_ref(inputs[1])
input_dim = x_.shape[-1]
monotonic_layer = to_monotonic(reshape_ref, input_dim, dc_decomp=True, shared=shared)
output = monotonic_layer[0](inputs[2:])
if len(monotonic_layer) > 1:
output = monotonic_layer[1](output)
f_ref = K.function(inputs, output_ref)
f_reshape = K.function(inputs[2:], output)
y_ref = f_ref(inputs_)
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_ = f_reshape(inputs_[2:])
assert_output_properties_box(
x_,
y_ref,
h_,
g_,
z_[:, 0],
z_[:, 1],
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"reshape_{}_{}_{}".format(odd, m_0, m_1),
decimal=decimal,
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
# permute
@pytest.mark.parametrize(
"odd, m_0, m_1, mode, floatx",
[
(0, 0, 1, "hybrid", 32),
(0, 0, 1, "forward", 32),
(0, 0, 1, "ibp", 32),
(0, 0, 1, "hybrid", 64),
(0, 0, 1, "forward", 64),
(0, 0, 1, "ibp", 64),
(0, 0, 1, "hybrid", 16),
(0, 0, 1, "forward", 16),
(0, 0, 1, "ibp", 16),
],
)
def test_Decomon_permute_box(odd, m_0, m_1, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 1
inputs = get_tensor_decomposition_images_box("channels_last", odd)
inputs_ = get_standard_values_images_box("channels_last", odd, m0=m_0, m1=m_1)
x, y, z, u_c, W_u, b_u, l_c, W_l, b_l, h, g = inputs
x_ = inputs_[0]
z_ = inputs_[2]
n_dim = len(y.shape) - 1
target_shape = np.random.permutation(n_dim) + 1
target_shape_ = tuple([0] + list(target_shape))
y_ = np.transpose(inputs_[1], target_shape_)
monotonic_layer = DecomonPermute(target_shape, dc_decomp=True, mode=mode)
if mode == "hybrid":
output = monotonic_layer(inputs[2:])
if mode == "forward":
output = monotonic_layer([z, W_u, b_u, W_l, b_l, h, g])
if mode == "ibp":
output = monotonic_layer([u_c, l_c, h, g])
f_permute = K.function(inputs[2:], output)
if mode == "hybrid":
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_ = f_permute(inputs_[2:])
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_, h_, g_ = f_permute(inputs_[2:])
u_c_ = None
l_c_ = None
if mode == "ibp":
u_c_, l_c_, h_, g_ = f_permute(inputs_[2:])
w_u_, b_u_, w_l_, b_l_ = [None] * 4
assert_output_properties_box(
x_,
y_,
h_,
g_,
z_[:, 0],
z_[:, 1],
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"reshape_{}_{}_{}".format(odd, m_0, m_1),
decimal=decimal,
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
@pytest.mark.parametrize(
"odd, m_0, m_1, mode, floatx",
[
(0, 0, 1, "hybrid", 32),
(0, 0, 1, "forward", 32),
(0, 0, 1, "ibp", 32),
(0, 0, 1, "hybrid", 64),
(0, 0, 1, "forward", 64),
(0, 0, 1, "ibp", 64),
(0, 0, 1, "hybrid", 16),
(0, 0, 1, "forward", 16),
(0, 0, 1, "ibp", 16),
],
)
def test_Decomon_permute_box_nodc(odd, m_0, m_1, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 1
inputs = get_tensor_decomposition_images_box("channels_last", odd, dc_decomp=False)
inputs_ = get_standard_values_images_box("channels_last", odd, m0=m_0, m1=m_1, dc_decomp=False)
x, y, z, u_c, W_u, b_u, l_c, W_l, b_l = inputs
x_ = inputs_[0]
z_ = inputs_[2]
n_dim = len(y.shape) - 1
target_shape = np.random.permutation(n_dim) + 1
target_shape_ = tuple([0] + list(target_shape))
y_ = np.transpose(inputs_[1], target_shape_)
monotonic_layer = DecomonPermute(target_shape, dc_decomp=False, mode=mode)
if mode == "hybrid":
output = monotonic_layer(inputs[2:])
if mode == "forward":
output = monotonic_layer([z, W_u, b_u, W_l, b_l])
if mode == "ibp":
output = monotonic_layer([u_c, l_c])
f_permute = K.function(inputs[2:], output)
if mode == "hybrid":
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_ = f_permute(inputs_[2:])
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_ = f_permute(inputs_[2:])
u_c_ = None
l_c_ = None
if mode == "ibp":
u_c_, l_c_ = f_permute(inputs_[2:])
w_u_, b_u_, w_l_, b_l_ = [None] * 4
assert_output_properties_box(
x_,
y_,
None,
None,
z_[:, 0],
z_[:, 1],
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"reshape_{}_{}_{}".format(odd, m_0, m_1),
decimal=decimal,
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
@pytest.mark.parametrize(
"odd, m_0, m_1, shared, floatx",
[
(0, 0, 1, False, 32),
(0, 0, 1, True, 32),
(0, 0, 1, False, 64),
(0, 0, 1, True, 64),
(0, 0, 1, False, 16),
(0, 0, 1, True, 16),
],
)
def test_Decomon_permute_to_monotonic_box(odd, m_0, m_1, shared, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 4
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 1
inputs = get_tensor_decomposition_images_box("channels_last", odd)
inputs_ = get_standard_values_images_box("channels_last", odd, m0=m_0, m1=m_1)
x, y, z, u_c, W_u, b_u, l_c, W_l, b_l, h, g = inputs
x_ = inputs_[0]
z_ = inputs_[2]
n_dim = len(y.shape) - 1
target_shape = np.random.permutation(n_dim) + 1
permute_ref = Permute(target_shape)
output_ref = permute_ref(inputs[1])
input_dim = x_.shape[-1]
monotonic_layer = to_monotonic(permute_ref, input_dim, dc_decomp=True, shared=shared)
output = monotonic_layer[0](inputs[2:])
if len(monotonic_layer) > 1:
output = monotonic_layer[1](output)
f_ref = K.function(inputs, output_ref)
f_permute = K.function(inputs[2:], output)
y_ref = f_ref(inputs_)
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_ = f_permute(inputs_[2:])
assert_output_properties_box(
x_,
y_ref,
h_,
g_,
z_[:, 0],
z_[:, 1],
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"reshape_{}_{}_{}".format(odd, m_0, m_1),
decimal=decimal,
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
| 27.46875
| 107
| 0.55282
| 1,908
| 12,306
| 3.175052
| 0.055031
| 0.015847
| 0.02377
| 0.019809
| 0.937933
| 0.91796
| 0.91796
| 0.915649
| 0.915649
| 0.915649
| 0
| 0.050423
| 0.28929
| 12,306
| 447
| 108
| 27.530201
| 0.642236
| 0.026247
| 0
| 0.848404
| 0
| 0
| 0.068632
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 1
| 0.015957
| false
| 0
| 0.021277
| 0
| 0.037234
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1586820887d7cfa183e8c291d8414bf0d285231a
| 76
|
py
|
Python
|
hello.py
|
Skandagn/pythonTraining
|
b003e7da1cd68cc72ec7db44fe829c73294efc26
|
[
"MIT"
] | null | null | null |
hello.py
|
Skandagn/pythonTraining
|
b003e7da1cd68cc72ec7db44fe829c73294efc26
|
[
"MIT"
] | null | null | null |
hello.py
|
Skandagn/pythonTraining
|
b003e7da1cd68cc72ec7db44fe829c73294efc26
|
[
"MIT"
] | null | null | null |
print("Hello")
print("Updating to v1. Modifying V1")
print("Updating to v2")
| 25.333333
| 37
| 0.723684
| 12
| 76
| 4.583333
| 0.583333
| 0.472727
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044118
| 0.105263
| 76
| 3
| 38
| 25.333333
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0.61039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
ec6e6d8c88500addc91ac8be86cfba944c8de570
| 204
|
py
|
Python
|
examples/doodle/views.py
|
rudeb0t/tornadio2go
|
ad618717c6b185fee533928d378c086e273685a9
|
[
"BSD-3-Clause"
] | 1
|
2015-02-13T06:36:03.000Z
|
2015-02-13T06:36:03.000Z
|
examples/doodle/views.py
|
rudeb0t/tornadio2go
|
ad618717c6b185fee533928d378c086e273685a9
|
[
"BSD-3-Clause"
] | 1
|
2016-03-24T16:00:38.000Z
|
2016-03-24T16:00:38.000Z
|
examples/doodle/views.py
|
rudeb0t/tornadio2go
|
ad618717c6b185fee533928d378c086e273685a9
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render_to_response
from django.template import RequestContext
def home(request):
return render_to_response('doodle/index.html', context_instance=RequestContext(request))
| 34
| 92
| 0.838235
| 26
| 204
| 6.384615
| 0.692308
| 0.120482
| 0.192771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 204
| 5
| 93
| 40.8
| 0.892473
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
ecc3bb32068040d6a9c90c932af414b0c091255a
| 17,842
|
py
|
Python
|
main_code/url.py
|
Xyerniu/website-finder
|
6aef84384ccadee0ed06d1a0db0aac19246fb980
|
[
"MIT"
] | null | null | null |
main_code/url.py
|
Xyerniu/website-finder
|
6aef84384ccadee0ed06d1a0db0aac19246fb980
|
[
"MIT"
] | null | null | null |
main_code/url.py
|
Xyerniu/website-finder
|
6aef84384ccadee0ed06d1a0db0aac19246fb980
|
[
"MIT"
] | null | null | null |
# backend
from selenium import webdriver
import random
import string
import math
user_input = []
words: list[str] = []
with open('lots of uk words.txt', 'r') as f:
re = f.read()
re = re.splitlines()
words = [x for x in re if x.strip()]
def stop():
global x1
x1 = False
def main_loop(v5, url, num2, num, end1, bool1, bool2, bool3, bool4):
global x1
x1 = True
PATH = "C:\Program Files (x86)\chromedriver.exe"
driver = webdriver.Chrome(PATH)
b = num2 + 1
temp = False
ve = ""
while x1 is True:
i_random = random_string(num, bool1, bool2, bool3, bool4)
r_word = words[random.randrange(10000)]
if url == "https://www.youtube.com/results?search_query=":
if v5 is True:
ve = url + r_word + i_random + "&sp=EgIQAg%253D%253D"
if ve in user_input:
if bool1 and bool2 and bool3 and bool4 is False and len(user_input) == 10000:
x1 = False
if bool1 or bool2 is True and len(user_input) == (math.pow(10000, b) * math.pow(26, num)):
x1 = False
if bool3 is True and len(user_input) == (math.pow(10000, b) * math.pow(10, num)):
x1 = False
if bool4 is True and len(user_input) == (math.pow(10000, b) * math.pow(40, num)):
x1 = False
if bool1 and bool2 is True and len(user_input) == (math.pow(10000, b) * math.pow(52, num)):
x1 = False
if bool3 and bool2 or bool1 is True and len(user_input) == (math.pow(10000, b) * math.pow(36, num)):
x1 = False
if bool3 and bool4 is True and len(user_input) == (math.pow(10000, b) * math.pow(50, num)):
x1 = False
if bool4 and bool2 or bool1 is True and len(user_input) == (math.pow(10000, b) * math.pow(66, num)):
x1 = False
if bool1 and bool2 and bool4 is True and len(user_input) == (
math.pow(10000, b) * math.pow(36, num)):
x1 = False
if bool4 and bool3 and bool1 or bool2 is True and len(user_input) == (
math.pow(10000, b) * math.pow(76, num)):
x1 = False
if bool1 and bool2 and bool3 is True and len(user_input) == (
math.pow(10000, b) * math.pow(62, num)):
x1 = False
if bool1 and bool2 and bool3 and bool4 is True and len(user_input) == (
math.pow(10000, b) * math.pow(102, num)):
x1 = False
else:
pass
if v5 is False:
ve = url + i_random + "&sp=EgIQAg%253D%253D"
if ve in user_input:
if bool1 or bool2 is True and len(user_input) == (math.pow(26, num)):
x1 = False
if bool3 is True and len(user_input) == (math.pow(10, num)):
x1 = False
if bool4 is True and len(user_input) == (math.pow(40, num)):
x1 = False
if bool1 and bool2 is True and len(user_input) == (math.pow(52, num)):
x1 = False
if bool3 and bool2 or bool1 is True and len(user_input) == (math.pow(36, num)):
x1 = False
if bool3 and bool4 is True and len(user_input) == (math.pow(50, num)):
x1 = False
if bool4 and bool2 or bool1 is True and len(user_input) == (math.pow(66, num)):
x1 = False
if bool1 and bool2 and bool4 is True and len(user_input) == (math.pow(36, num)):
x1 = False
if bool4 and bool3 and bool1 or bool2 is True and len(user_input) == (math.pow(76, num)):
x1 = False
if bool1 and bool2 and bool3 is True and len(user_input) == (math.pow(62, num)):
x1 = False
if bool1 and bool2 and bool3 and bool4 is True and len(user_input) == (math.pow(102, num)):
x1 = False
else:
pass
if ve in user_input:
pass
elif ve:
user_input.append(ve)
print(ve)
try:
driver.get(ve)
except Exception:
print(ve + " doesn't work")
if url == "https://sites.google.com/view/":
if v5 is True:
ve = url + r_word + i_random
if ve in user_input:
if bool1 and bool2 and bool3 and bool4 is False and len(user_input) == 10000:
x1 = False
if bool1 or bool2 is True and len(user_input) == (math.pow(10000, b) * math.pow(26, num)):
x1 = False
if bool3 is True and len(user_input) == (math.pow(10000, b) * math.pow(10, num)):
x1 = False
if bool4 is True and len(user_input) == (math.pow(10000, b) * math.pow(40, num)):
x1 = False
if bool1 and bool2 is True and len(user_input) == (math.pow(10000, b) * math.pow(52, num)):
x1 = False
if bool3 and bool2 or bool1 is True and len(user_input) == (math.pow(10000, b) * math.pow(36, num)):
x1 = False
if bool3 and bool4 is True and len(user_input) == (math.pow(10000, b) * math.pow(50, num)):
x1 = False
if bool4 and bool2 or bool1 is True and len(user_input) == (math.pow(10000, b) * math.pow(66, num)):
x1 = False
if bool1 and bool2 and bool4 is True and len(user_input) == (
math.pow(10000, b) * math.pow(36, num)):
x1 = False
if bool4 and bool3 and bool1 or bool2 is True and len(user_input) == (
math.pow(10000, b) * math.pow(76, num)):
x1 = False
if bool1 and bool2 and bool3 is True and len(user_input) == (
math.pow(10000, b) * math.pow(62, num)):
x1 = False
if bool1 and bool2 and bool3 and bool4 is True and len(user_input) == (
math.pow(10000, b) * math.pow(102, num)):
x1 = False
else:
pass
if v5 is False:
ve = url + i_random
if ve in user_input:
if bool1 or bool2 is True and len(user_input) == (math.pow(26, num)):
x1 = False
if bool3 is True and len(user_input) == (math.pow(10, num)):
x1 = False
if bool4 is True and len(user_input) == (math.pow(40, num)):
x1 = False
if bool1 and bool2 is True and len(user_input) == (math.pow(52, num)):
x1 = False
if bool3 and bool2 or bool1 is True and len(user_input) == (math.pow(36, num)):
x1 = False
if bool3 and bool4 is True and len(user_input) == (math.pow(50, num)):
x1 = False
if bool4 and bool2 or bool1 is True and len(user_input) == (math.pow(66, num)):
x1 = False
if bool1 and bool2 and bool4 is True and len(user_input) == (math.pow(36, num)):
x1 = False
if bool4 and bool3 and bool1 or bool2 is True and len(user_input) == (math.pow(76, num)):
x1 = False
if bool1 and bool2 and bool3 is True and len(user_input) == (math.pow(62, num)):
x1 = False
if bool1 and bool2 and bool3 and bool4 is True and len(user_input) == (math.pow(102, num)):
x1 = False
else:
pass
if ve in user_input:
pass
elif ve:
user_input.append(ve)
print(ve)
try:
driver.get(ve)
except Exception:
print(ve + " doesn't work")
if url == "https://www.reddit.com/r/":
if v5 is True:
ve = url + r_word + i_random
if ve in user_input:
if bool1 and bool2 and bool3 and bool4 is False and len(user_input) == 10000:
x1 = False
if bool1 or bool2 is True and len(user_input) == (math.pow(10000, b) * math.pow(26, num)):
x1 = False
if bool3 is True and len(user_input) == (math.pow(10000, b) * math.pow(10, num)):
x1 = False
if bool4 is True and len(user_input) == (math.pow(10000, b) * math.pow(40, num)):
x1 = False
if bool1 and bool2 is True and len(user_input) == (math.pow(10000, b) * math.pow(52, num)):
x1 = False
if bool3 and bool2 or bool1 is True and len(user_input) == (math.pow(10000, b) * math.pow(36, num)):
x1 = False
if bool3 and bool4 is True and len(user_input) == (math.pow(10000, b) * math.pow(50, num)):
x1 = False
if bool4 and bool2 or bool1 is True and len(user_input) == (math.pow(10000, b) * math.pow(66, num)):
x1 = False
if bool1 and bool2 and bool4 is True and len(user_input) == (
math.pow(10000, b) * math.pow(36, num)):
x1 = False
if bool4 and bool3 and bool1 or bool2 is True and len(user_input) == (
math.pow(10000, b) * math.pow(76, num)):
x1 = False
if bool1 and bool2 and bool3 is True and len(user_input) == (
math.pow(10000, b) * math.pow(62, num)):
x1 = False
if bool1 and bool2 and bool3 and bool4 is True and len(user_input) == (
math.pow(10000, b) * math.pow(102, num)):
x1 = False
else:
pass
if v5 is False:
ve = url + i_random
if ve in user_input:
if bool1 or bool2 is True and len(user_input) == (math.pow(26, num)):
x1 = False
if bool3 is True and len(user_input) == (math.pow(10, num)):
x1 = False
if bool4 is True and len(user_input) == (math.pow(40, num)):
x1 = False
if bool1 and bool2 is True and len(user_input) == (math.pow(52, num)):
x1 = False
if bool3 and bool2 or bool1 is True and len(user_input) == (math.pow(36, num)):
x1 = False
if bool3 and bool4 is True and len(user_input) == (math.pow(50, num)):
x1 = False
if bool4 and bool2 or bool1 is True and len(user_input) == (math.pow(66, num)):
x1 = False
if bool1 and bool2 and bool4 is True and len(user_input) == (math.pow(36, num)):
x1 = False
if bool4 and bool3 and bool1 or bool2 is True and len(user_input) == (math.pow(76, num)):
x1 = False
if bool1 and bool2 and bool3 is True and len(user_input) == (math.pow(62, num)):
x1 = False
if bool1 and bool2 and bool3 and bool4 is True and len(user_input) == (math.pow(102, num)):
x1 = False
else:
pass
if ve in user_input:
pass
elif ve:
user_input.append(ve)
print(ve)
try:
driver.get(ve)
except Exception:
print(ve + " doesn't work")
else:
if temp is False:
print(ve)
temp = False
if v5 is True:
ve = url + r_word + i_random
for j in range(num2 - 1):
r_word = words[random.randrange(10000)]
i_random = random_string(num, bool1, bool2, bool3, bool4)
ve = ve + r_word + i_random
ve = ve + end1
if ve in user_input:
if bool1 and bool2 and bool3 and bool4 is False and len(user_input) == 10000:
x1 = False
if bool1 or bool2 is True and len(user_input) == (math.pow(10000, b) * math.pow(26, num)):
x1 = False
if bool3 is True and len(user_input) == (math.pow(10000, b) * math.pow(10, num)):
x1 = False
if bool4 is True and len(user_input) == (math.pow(10000, b) * math.pow(40, num)):
x1 = False
if bool1 and bool2 is True and len(user_input) == (math.pow(10000, b) * math.pow(52, num)):
x1 = False
if bool3 and bool2 or bool1 is True and len(user_input) == (math.pow(10000, b) * math.pow(36, num)):
x1 = False
if bool3 and bool4 is True and len(user_input) == (math.pow(10000, b) * math.pow(50, num)):
x1 = False
if bool4 and bool2 or bool1 is True and len(user_input) == (math.pow(10000, b) * math.pow(66, num)):
x1 = False
if bool1 and bool2 and bool4 is True and len(user_input) == (
math.pow(10000, b) * math.pow(36, num)):
x1 = False
if bool4 and bool3 and bool1 or bool2 is True and len(user_input) == (
math.pow(10000, b) * math.pow(76, num)):
x1 = False
if bool1 and bool2 and bool3 is True and len(user_input) == (
math.pow(10000, b) * math.pow(62, num)):
x1 = False
if bool1 and bool2 and bool3 and bool4 is True and len(user_input) == (
math.pow(10000, b) * math.pow(102, num)):
x1 = False
else:
pass
if v5 is False:
ve = url + i_random + end1
if ve in user_input:
if bool1 or bool2 is True and len(user_input) == (math.pow(26, num)):
x1 = False
if bool3 is True and len(user_input) == (math.pow(10, num)):
x1 = False
if bool4 is True and len(user_input) == (math.pow(40, num)):
x1 = False
if bool1 and bool2 is True and len(user_input) == (math.pow(52, num)):
x1 = False
if bool3 and bool2 or bool1 is True and len(user_input) == (math.pow(36, num)):
x1 = False
if bool3 and bool4 is True and len(user_input) == (math.pow(50, num)):
x1 = False
if bool4 and bool2 or bool1 is True and len(user_input) == (math.pow(66, num)):
x1 = False
if bool1 and bool2 and bool4 is True and len(user_input) == (math.pow(36, num)):
x1 = False
if bool4 and bool3 and bool1 or bool2 is True and len(user_input) == (math.pow(76, num)):
x1 = False
if bool1 and bool2 and bool3 is True and len(user_input) == (math.pow(62, num)):
x1 = False
if bool1 and bool2 and bool3 and bool4 is True and len(user_input) == (math.pow(102, num)):
x1 = False
else:
pass
if ve in user_input:
pass
elif ve:
user_input.append(ve)
try:
driver.get(ve)
except Exception:
temp = True
driver.close()
def random_string(length=32, uppercase=True, lowercase=True, numbers=True, symbols=False):
character_set = ''
if uppercase:
character_set += string.ascii_uppercase
if lowercase:
character_set += string.ascii_lowercase
if numbers:
character_set += string.digits
if symbols:
character_set += string.punctuation
return ''.join(random.choice(character_set) for i in range(length))
| 53.10119
| 121
| 0.442832
| 2,195
| 17,842
| 3.535763
| 0.056036
| 0.119057
| 0.118541
| 0.177812
| 0.898853
| 0.898853
| 0.891122
| 0.887386
| 0.886355
| 0.875532
| 0
| 0.085759
| 0.466708
| 17,842
| 335
| 122
| 53.259701
| 0.7299
| 0.000392
| 0
| 0.873065
| 0
| 0
| 0.013659
| 0.001257
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009288
| false
| 0.037152
| 0.012384
| 0
| 0.024768
| 0.021672
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
eccef7e6b691513b0b734140b7bd40e3bc971d31
| 131
|
py
|
Python
|
indiecoin/util/__init__.py
|
fernandolobato/IndieCoin
|
4067a0e37b359f879d796c7d7f65e6f0350d2015
|
[
"MIT"
] | 5
|
2017-11-20T08:46:38.000Z
|
2021-12-28T20:49:16.000Z
|
indiecoin/util/__init__.py
|
fernandolobato/IndieCoin
|
4067a0e37b359f879d796c7d7f65e6f0350d2015
|
[
"MIT"
] | null | null | null |
indiecoin/util/__init__.py
|
fernandolobato/IndieCoin
|
4067a0e37b359f879d796c7d7f65e6f0350d2015
|
[
"MIT"
] | null | null | null |
import os
__all__ = ['default_data_directory']
def default_data_directory():
return os.path.expanduser('~/.indiecoin/data')
| 16.375
| 50
| 0.740458
| 16
| 131
| 5.5625
| 0.6875
| 0.247191
| 0.449438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122137
| 131
| 7
| 51
| 18.714286
| 0.773913
| 0
| 0
| 0
| 0
| 0
| 0.29771
| 0.167939
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
01f9b53facf43b2f2d83824a304e4c1ddab35dc5
| 273
|
py
|
Python
|
modules/retrieval/text_classification/libs/loggers/logger_template.py
|
martinhoang11/vietnamese-ocr-toolbox
|
524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5
|
[
"Apache-2.0"
] | 14
|
2021-09-05T10:42:14.000Z
|
2022-03-10T16:27:26.000Z
|
modules/retrieval/text_classification/libs/loggers/logger_template.py
|
martinhoang11/vietnamese-ocr-toolbox
|
524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5
|
[
"Apache-2.0"
] | 1
|
2021-06-16T11:35:24.000Z
|
2021-06-16T11:35:24.000Z
|
modules/retrieval/text_classification/libs/loggers/logger_template.py
|
martinhoang11/vietnamese-ocr-toolbox
|
524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5
|
[
"Apache-2.0"
] | 5
|
2021-09-05T13:26:51.000Z
|
2022-03-09T07:49:45.000Z
|
class LoggerTemplate():
def __init__(self, *args, **kwargs):
raise NotImplementedError
def update_loss(self, phase, value, step):
raise NotImplementedError
def update_metric(self, phase, metric, value, step):
raise NotImplementedError
| 27.3
| 56
| 0.688645
| 28
| 273
| 6.5
| 0.535714
| 0.395604
| 0.296703
| 0.362637
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.223443
| 273
| 9
| 57
| 30.333333
| 0.858491
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
170dfe8449b9fa4a87333a6c04186ad64bedd78c
| 8,381
|
py
|
Python
|
tests/test_request.py
|
andrewtholt/python-sonoffdiy
|
350ae275fefadd92355741eed0667a5422ea7ac5
|
[
"MIT"
] | 3
|
2019-11-09T14:46:52.000Z
|
2022-02-11T00:34:29.000Z
|
tests/test_request.py
|
andrewtholt/python-sonoffdiy
|
350ae275fefadd92355741eed0667a5422ea7ac5
|
[
"MIT"
] | 3
|
2020-01-12T12:48:42.000Z
|
2021-11-15T17:51:54.000Z
|
tests/test_request.py
|
andrewtholt/python-sonoffdiy
|
350ae275fefadd92355741eed0667a5422ea7ac5
|
[
"MIT"
] | 4
|
2020-04-26T11:38:08.000Z
|
2022-02-11T00:34:31.000Z
|
"""Tests for `sonoffdiy.SonoffDIY`."""
import asyncio
import aiohttp
import pytest
from sonoffdiy import SonoffDIY
from sonoffdiy.__version__ import __version__
from sonoffdiy.exceptions import SonoffDIYConnectionError, SonoffDIYError
@pytest.mark.asyncio
async def test_json_request(event_loop, aresponses):
"""Test JSON response is handled correctly."""
aresponses.add(
"example.com:8081",
"/",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{"seq": 26, "error": 0, "data": {"test": "ok"}}',
),
)
async with aiohttp.ClientSession(loop=event_loop) as session:
diy = SonoffDIY("example.com", session=session, loop=event_loop)
response = await diy._request("/")
assert response["test"] == "ok"
@pytest.mark.asyncio
async def test_encoded_json_request(event_loop, aresponses):
"""Test JSON response is handled correctly."""
aresponses.add(
"example.com:8081",
"/",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{"seq": 26, "error": 0, "data": "{\\"test\\": \\"ok\\"}"}',
),
)
async with aiohttp.ClientSession(loop=event_loop) as session:
diy = SonoffDIY("example.com", session=session, loop=event_loop)
response = await diy._request("/")
assert response["test"] == "ok"
@pytest.mark.asyncio
async def test_internal_session(event_loop, aresponses):
"""Test internal client session is handled correctly."""
aresponses.add(
"example.com:8081",
"/",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{"seq": 26, "error": 0, "data": {"test": "ok"}}',
),
)
async with SonoffDIY("example.com", loop=event_loop) as diy:
response = await diy._request("/")
assert response["test"] == "ok"
@pytest.mark.asyncio
async def test_internal_eventloop(aresponses):
"""Test internal event loop creation is handled correctly."""
aresponses.add(
"example.com:8081",
"/",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{"seq": 26, "error": 0, "data": {"test": "ok"}}',
),
)
async with SonoffDIY("example.com") as diy:
response = await diy._request("/")
assert response["test"] == "ok"
@pytest.mark.asyncio
async def test_request_port(event_loop, aresponses):
"""Test Sonoff DIY device running on non-standard port."""
aresponses.add(
"example.com:8888",
"/",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{"seq": 26, "error": 0, "data": {"test": "ok"}}',
),
)
async with aiohttp.ClientSession(loop=event_loop) as session:
diy = SonoffDIY("example.com", port=8888, session=session, loop=event_loop)
response = await diy._request("/")
assert response["test"] == "ok"
@pytest.mark.asyncio
async def test_request_user_agent(event_loop, aresponses):
"""Test client sending correct user agent headers."""
# Handle to run asserts on request in
async def response_handler(request):
assert request.headers["User-Agent"] == f"PythonSonoffDIY/{__version__}"
return aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{"seq": 1, "error": 0}',
)
aresponses.add("example.com:8081", "/", "POST", response_handler)
async with aiohttp.ClientSession(loop=event_loop) as session:
diy = SonoffDIY("example.com", session=session, loop=event_loop)
await diy._request("/")
@pytest.mark.asyncio
async def test_request_custom_user_agent(event_loop, aresponses):
"""Test client sending correct user agent headers."""
# Handle to run asserts on request in
async def response_handler(request):
assert request.headers["User-Agent"] == "LoremIpsum/1.0"
return aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{"seq": 1, "error": 0}',
)
aresponses.add("example.com:8081", "/", "POST", response_handler)
async with aiohttp.ClientSession(loop=event_loop) as session:
diy = SonoffDIY(
"example.com",
session=session,
loop=event_loop,
user_agent="LoremIpsum/1.0",
)
await diy._request("/")
@pytest.mark.asyncio
async def test_timeout(event_loop, aresponses):
"""Test request timeout from Sonoff DIY device."""
# Faking a timeout by sleeping
async def response_handler(_):
await asyncio.sleep(2)
return aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{"seq": 1, "error": 0}',
)
aresponses.add("example.com:8081", "/", "POST", response_handler)
async with aiohttp.ClientSession(loop=event_loop) as session:
diy = SonoffDIY(
"example.com", session=session, loop=event_loop, request_timeout=1
)
with pytest.raises(SonoffDIYConnectionError):
assert await diy._request("/")
@pytest.mark.asyncio
async def test_invalid_content_type(event_loop, aresponses):
"""Test request timeout from Sonoff DIY device."""
aresponses.add(
"example.com:8081",
"/",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "other/content"},
text='{"seq": 26, "error": 0, "data": {"test": "ok"}}',
),
)
aresponses.add(
"example.com:8081",
"/",
"POST",
aresponses.Response(
status=200, text='{"seq": 26, "error": 0, "data": {"test": "ok"}}',
),
)
async with aiohttp.ClientSession(loop=event_loop) as session:
diy = SonoffDIY("example.com", session=session, loop=event_loop)
with pytest.raises(SonoffDIYError):
await diy._request("/")
with pytest.raises(SonoffDIYError):
await diy._request("/")
@pytest.mark.asyncio
async def test_missing_error_response(event_loop, aresponses):
"""Test missing error code response from Sonoff DIY device."""
aresponses.add(
"example.com:8081",
"/",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{"seq": 26, "data": {"test": "ok"}}',
),
)
async with aiohttp.ClientSession(loop=event_loop) as session:
diy = SonoffDIY("example.com", session=session, loop=event_loop)
with pytest.raises(SonoffDIYError):
await diy._request("/")
@pytest.mark.asyncio
async def test_error_response(event_loop, aresponses):
"""Test error response from Sonoff DIY device."""
aresponses.add(
"example.com:8081",
"/",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{"seq": 26, "error": 422}',
),
)
async with aiohttp.ClientSession(loop=event_loop) as session:
diy = SonoffDIY("example.com", session=session, loop=event_loop)
with pytest.raises(SonoffDIYError):
await diy._request("/")
@pytest.mark.asyncio
async def test_http_error(event_loop, aresponses):
"""Test HTTP error response handling."""
aresponses.add(
"example.com:8081",
"/",
"POST",
aresponses.Response(text="OMG PUPPIES!", status=404),
)
aresponses.add(
"example.com:8081",
"/",
"POST",
aresponses.Response(text="OMG PUPPIES!", status=500),
)
async with aiohttp.ClientSession(loop=event_loop) as session:
diy = SonoffDIY("example.com", session=session, loop=event_loop)
with pytest.raises(SonoffDIYError):
assert await diy._request("/")
with pytest.raises(SonoffDIYError):
assert await diy._request("/")
| 31.86692
| 83
| 0.596826
| 895
| 8,381
| 5.481564
| 0.109497
| 0.060538
| 0.055646
| 0.065634
| 0.857725
| 0.850999
| 0.830412
| 0.828985
| 0.809213
| 0.782307
| 0
| 0.022811
| 0.257249
| 8,381
| 262
| 84
| 31.98855
| 0.765301
| 0.015989
| 0
| 0.736842
| 0
| 0
| 0.1761
| 0.003808
| 0
| 0
| 0
| 0
| 0.047847
| 1
| 0
| false
| 0
| 0.028708
| 0
| 0.043062
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
17145ed2f4257a730804769410aa79f62712ca7a
| 8,317
|
py
|
Python
|
boto3_type_annotations_with_docs/boto3_type_annotations/cloudformation/waiter.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/cloudformation/waiter.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/cloudformation/waiter.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Dict
from botocore.waiter import Waiter
class ChangeSetCreateComplete(Waiter):
def wait(self, ChangeSetName: str, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`CloudFormation.Client.describe_change_set` every 30 seconds until a successful state is reached. An error is returned after 120 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeChangeSet>`_
**Request Syntax**
::
waiter.wait(
ChangeSetName='string',
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type ChangeSetName: string
:param ChangeSetName: **[REQUIRED]**
The name or Amazon Resource Name (ARN) of the change set that you want to describe.
:type StackName: string
:param StackName:
If you specified the name of a change set, specify the stack name or ID (ARN) of the change set you want to describe.
:type NextToken: string
:param NextToken:
A string (provided by the DescribeChangeSet response output) that identifies the next page of information that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 30
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 120
:returns: None
"""
pass
class StackCreateComplete(Waiter):
def wait(self, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`CloudFormation.Client.describe_stacks` every 30 seconds until a successful state is reached. An error is returned after 120 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStacks>`_
**Request Syntax**
::
waiter.wait(
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type StackName: string
:param StackName:
The name or the unique stack ID that is associated with the stack, which are not always interchangeable:
* Running stacks: You can specify either the stack\'s name or its unique stack ID.
* Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type NextToken: string
:param NextToken:
A string that identifies the next page of stacks that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 30
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 120
:returns: None
"""
pass
class StackDeleteComplete(Waiter):
def wait(self, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`CloudFormation.Client.describe_stacks` every 30 seconds until a successful state is reached. An error is returned after 120 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStacks>`_
**Request Syntax**
::
waiter.wait(
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type StackName: string
:param StackName:
The name or the unique stack ID that is associated with the stack, which are not always interchangeable:
* Running stacks: You can specify either the stack\'s name or its unique stack ID.
* Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type NextToken: string
:param NextToken:
A string that identifies the next page of stacks that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 30
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 120
:returns: None
"""
pass
class StackExists(Waiter):
def wait(self, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`CloudFormation.Client.describe_stacks` every 5 seconds until a successful state is reached. An error is returned after 20 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStacks>`_
**Request Syntax**
::
waiter.wait(
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type StackName: string
:param StackName:
The name or the unique stack ID that is associated with the stack, which are not always interchangeable:
* Running stacks: You can specify either the stack\'s name or its unique stack ID.
* Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type NextToken: string
:param NextToken:
A string that identifies the next page of stacks that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 5
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 20
:returns: None
"""
pass
class StackUpdateComplete(Waiter):
def wait(self, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`CloudFormation.Client.describe_stacks` every 30 seconds until a successful state is reached. An error is returned after 120 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStacks>`_
**Request Syntax**
::
waiter.wait(
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type StackName: string
:param StackName:
The name or the unique stack ID that is associated with the stack, which are not always interchangeable:
* Running stacks: You can specify either the stack\'s name or its unique stack ID.
* Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type NextToken: string
:param NextToken:
A string that identifies the next page of stacks that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 30
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 120
:returns: None
"""
pass
| 44.005291
| 166
| 0.605747
| 938
| 8,317
| 5.359275
| 0.142857
| 0.026258
| 0.031032
| 0.025463
| 0.908494
| 0.888403
| 0.883032
| 0.875075
| 0.875075
| 0.875075
| 0
| 0.020362
| 0.315017
| 8,317
| 188
| 167
| 44.239362
| 0.862033
| 0.74474
| 0
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| false
| 0.294118
| 0.117647
| 0
| 0.705882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 9
|
175c6db073f926bfb6161b6e8e6ca6152df52f5a
| 222
|
py
|
Python
|
src/main/python/ipystate/decl.py
|
mrMakaronka/pyenv
|
48702153429019f2176148aaaaa45b3b48080df2
|
[
"MIT"
] | null | null | null |
src/main/python/ipystate/decl.py
|
mrMakaronka/pyenv
|
48702153429019f2176148aaaaa45b3b48080df2
|
[
"MIT"
] | 5
|
2020-11-26T06:02:13.000Z
|
2021-11-15T08:52:16.000Z
|
src/main/python/ipystate/decl.py
|
mrMakaronka/pyenv
|
48702153429019f2176148aaaaa45b3b48080df2
|
[
"MIT"
] | null | null | null |
class VarDecl:
def __init__(self, name: str, type: str):
self._name = name
self._type = type
def name(self) -> str:
return self._name
def type(self) -> str:
return self._type
| 18.5
| 45
| 0.563063
| 29
| 222
| 4.034483
| 0.310345
| 0.205128
| 0.222222
| 0.290598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.328829
| 222
| 11
| 46
| 20.181818
| 0.785235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
179bfeb0335a40653a9adfc96c408f7cec69ec4e
| 3,983
|
py
|
Python
|
main/models.py
|
iitgswc/IITG-Main-Website
|
d19c4a2f2c70ee42ccc6246827ca03f613e810e2
|
[
"Apache-2.0"
] | null | null | null |
main/models.py
|
iitgswc/IITG-Main-Website
|
d19c4a2f2c70ee42ccc6246827ca03f613e810e2
|
[
"Apache-2.0"
] | null | null | null |
main/models.py
|
iitgswc/IITG-Main-Website
|
d19c4a2f2c70ee42ccc6246827ca03f613e810e2
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
# Navbar
class About(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class AcademicAffairs(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class Administration(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class Department(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class AcademicCenter(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class ExtramurailCenter(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class ExchangeProgram(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class Utility(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class Form(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class Partnership(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
# Carousel
class CarouselImages(models.Model):
image = models.CharField(max_length=2000, help_text = "Add the image to the static folder and give the image name here.")
class Announcement(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
# Middle 3 columns
class ResearchInnovation(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class UpcomingEvent(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
date = models.DateField()
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class QuickLink(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
# Footer 3 Columns
class Resource(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class Programm(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
class Current(models.Model):
title = models.CharField(max_length=200)
link = models.CharField(max_length=2000, default="#")
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
| 30.638462
| 122
| 0.765252
| 534
| 3,983
| 5.481273
| 0.114232
| 0.179365
| 0.215237
| 0.286983
| 0.868124
| 0.858558
| 0.858558
| 0.858558
| 0.858558
| 0.858558
| 0
| 0.035241
| 0.109465
| 3,983
| 130
| 123
| 30.638462
| 0.789963
| 0.012302
| 0
| 0.801887
| 0
| 0
| 0.020611
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.160377
| false
| 0
| 0.009434
| 0.160377
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 10
|
bd725afae5260f5f88bfd14cce568eacc753fa63
| 2,674
|
py
|
Python
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/common/storages.py
|
mazdakb/django-naqsh
|
1ba19ebe74ed63d96eae9d226f1a0ebcfbb93e84
|
[
"BSD-3-Clause"
] | 50
|
2018-05-04T14:03:30.000Z
|
2021-08-05T19:29:47.000Z
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/common/storages.py
|
mazdakb/django-naqsh
|
1ba19ebe74ed63d96eae9d226f1a0ebcfbb93e84
|
[
"BSD-3-Clause"
] | 143
|
2018-05-10T16:51:39.000Z
|
2021-02-27T15:25:44.000Z
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/common/storages.py
|
mazdakb/django-naqsh
|
1ba19ebe74ed63d96eae9d226f1a0ebcfbb93e84
|
[
"BSD-3-Clause"
] | 3
|
2019-05-27T18:15:31.000Z
|
2020-04-05T14:31:35.000Z
|
{% if cookiecutter.cloud_provider == 'AWS' -%}
import os
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from storages.backends.s3boto3 import S3Boto3Storage
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = "static"
default_acl = "public-read"
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = "media"
file_overwrite = False
class OverwriteStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""Returns a filename that's free on the target storage system, and
available for new content to be written to.
Found at http://djangosnippets.org/snippets/976/
Notes:
This file storage solves overwrite on upload problem.
"""
# If the filename already exists, remove it as if it was a true file system
if self.exists(name):
os.remove(os.path.join(settings.MEDIA_ROOT, name))
return name
{%- elif cookiecutter.cloud_provider == 'GCP' -%}
import os
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from storages.backends.gcloud import GoogleCloudStorage
class StaticRootGoogleCloudStorage(GoogleCloudStorage):
location = "static"
default_acl = "publicRead"
class MediaRootGoogleCloudStorage(GoogleCloudStorage):
location = "media"
file_overwrite = False
class OverwriteStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""Returns a filename that's free on the target storage system, and
available for new content to be written to.
Found at http://djangosnippets.org/snippets/976/
Notes:
This file storage solves overwrite on upload problem.
"""
# If the filename already exists, remove it as if it was a true file system
if self.exists(name):
os.remove(os.path.join(settings.MEDIA_ROOT, name))
return name
{%- else -%}
import os
from django.conf import settings
from django.core.files.storage import FileSystemStorage
class OverwriteStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""Returns a filename that's free on the target storage system, and
available for new content to be written to.
Found at http://djangosnippets.org/snippets/976/
Notes:
This file storage solves overwrite on upload problem.
"""
# If the filename already exists, remove it as if it was a true file system
if self.exists(name):
os.remove(os.path.join(settings.MEDIA_ROOT, name))
{%- endif %}
| 31.093023
| 83
| 0.702319
| 331
| 2,674
| 5.619335
| 0.271903
| 0.032258
| 0.019355
| 0.029032
| 0.794624
| 0.794624
| 0.794624
| 0.794624
| 0.794624
| 0.794624
| 0
| 0.010072
| 0.220269
| 2,674
| 85
| 84
| 31.458824
| 0.882014
| 0.082648
| 0
| 0.707317
| 0
| 0
| 0.029951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.268293
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bdbad6b6df06a667c065c9e8614c378d82c5458a
| 3,651
|
py
|
Python
|
query.py
|
timruet/DBS_Project
|
69a28f886c148b9905f0f1facd44e373b416ee43
|
[
"MIT"
] | null | null | null |
query.py
|
timruet/DBS_Project
|
69a28f886c148b9905f0f1facd44e373b416ee43
|
[
"MIT"
] | null | null | null |
query.py
|
timruet/DBS_Project
|
69a28f886c148b9905f0f1facd44e373b416ee43
|
[
"MIT"
] | null | null | null |
import psycopg2
def get_oldest():
conn = None
try:
conn = psycopg2.connect(host="localhost", database="postgres", user="postgres", password="12345")
cur = conn.cursor()
cur.execute("SELECT MAX(age) FROM mfb_user")
result1 = cur.fetchone()[0]
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return result1
def get_user_with_most_dates():
conn = None
result2 = None
try:
conn = psycopg2.connect(host="localhost", database="postgres", user="postgres", password="12345")
cur = conn.cursor()
cur.execute("SELECT DISTINCT id FROM (SELECT id ,COUNT(id) FROM dates GROUP BY id ORDER BY COUNT(id) DESC LIMIT 1) as a")
result2 = int(cur.fetchone()[0])
cur.execute(f"SELECT DISTINCT id, screenname, name FROM mfb_user WHERE id = {result2} ")
result2 = cur.fetchone()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return result2
def get_user_with_highest_income():
conn = None
result3 = None
try:
conn = psycopg2.connect(host="localhost", database="postgres", user="postgres", password="12345")
cur = conn.cursor()
cur.execute("SELECT DISTINCT id FROM (SELECT id, COUNT(income) FROM mfb_user GROUP BY id ORDER BY COUNT(income) DESC LIMIT 1) as a")
result3 = cur.fetchone()[0]
cur.execute(f"SELECT DISTINCT id, screenname, name FROM mfb_user WHERE id = {result3} ")
result3 = cur.fetchone()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return result3
def get_number_of_users_with_more_than_two_marriages():
conn = None
result4 = None
try:
conn = psycopg2.connect(host="localhost", database="postgres", user="postgres", password="12345")
cur = conn.cursor()
cur.execute("SELECT COUNT(id) FROM(SELECT id, COUNT(id) FROM marriage GROUP BY id HAVING COUNT(id) >= 2 ORDER BY COUNT(id) DESC) as a")
result4 = cur.fetchone()[0]
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return result4
def get_user_with_most_fans():
conn = None
result5 = None
try:
conn = psycopg2.connect(host="localhost", database="postgres", user="postgres", password="12345")
cur = conn.cursor()
cur.execute("SELECT DISTINCT id FROM(SELECT id, COUNT(id) FROM is_fan GROUP BY id ORDER BY COUNT(id) DESC LIMIT 1) as a")
result5 = cur.fetchone()[0]
cur.execute(f"SELECT DISTINCT id, screenname, name FROM mfb_user WHERE id = {result5} ")
result5 = cur.fetchone()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return result5
def get_number_of_users_without_fans_or_relationship():
conn = None
result6 = None
try:
conn = psycopg2.connect(host="localhost", database="postgres", user="postgres", password="12345")
cur = conn.cursor()
cur.execute("SELECT COUNT(id) FROM (SELECT id FROM mfb_user EXCEPT (SELECT id FROM is_fan) UNION (SELECT id FROM dates) UNION (SELECT id FROM marriage)) as a")
result6 = cur.fetchone()[0]
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return result6
print (get_oldest())
print(get_user_with_most_dates())
print(get_user_with_highest_income())
print(get_number_of_users_with_more_than_two_marriages())
print(get_user_with_most_fans())
print(get_number_of_users_without_fans_or_relationship())
| 24.668919
| 161
| 0.711586
| 527
| 3,651
| 4.808349
| 0.151803
| 0.028414
| 0.026046
| 0.044988
| 0.832676
| 0.75809
| 0.749803
| 0.745462
| 0.713102
| 0.681531
| 0
| 0.024992
| 0.167078
| 3,651
| 147
| 162
| 24.836735
| 0.808287
| 0
| 0
| 0.588235
| 0
| 0.04902
| 0.278828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0.058824
| 0.009804
| 0
| 0.127451
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
da72940db4da23ca94af175c31771bfeab6d74a4
| 82
|
py
|
Python
|
example/searcher/__init__.py
|
crystina-z/profane
|
b467b0d45d37de856e02eb0afad9ff012d215a42
|
[
"Apache-2.0"
] | 9
|
2020-06-05T15:12:01.000Z
|
2021-05-28T12:19:02.000Z
|
example/searcher/__init__.py
|
crystina-z/profane
|
b467b0d45d37de856e02eb0afad9ff012d215a42
|
[
"Apache-2.0"
] | 7
|
2020-06-11T08:22:00.000Z
|
2022-01-28T09:32:20.000Z
|
example/searcher/__init__.py
|
crystina-z/profane
|
b467b0d45d37de856e02eb0afad9ff012d215a42
|
[
"Apache-2.0"
] | 4
|
2020-06-10T23:12:16.000Z
|
2022-01-27T17:54:36.000Z
|
from profane import import_all_modules
import_all_modules(__file__, __package__)
| 20.5
| 41
| 0.878049
| 11
| 82
| 5.454545
| 0.636364
| 0.3
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085366
| 82
| 3
| 42
| 27.333333
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
da794f4a78f30e97561ee99c35198abb0f1eea5d
| 9,513
|
py
|
Python
|
tests/test_recursive_folder.py
|
yuki-inaho/superannotate-python-sdk
|
cae3ab09125631ef48d76e6ac017b94a4d7bc4a7
|
[
"MIT"
] | null | null | null |
tests/test_recursive_folder.py
|
yuki-inaho/superannotate-python-sdk
|
cae3ab09125631ef48d76e6ac017b94a4d7bc4a7
|
[
"MIT"
] | null | null | null |
tests/test_recursive_folder.py
|
yuki-inaho/superannotate-python-sdk
|
cae3ab09125631ef48d76e6ac017b94a4d7bc4a7
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import time
import superannotate as sa
sa.init(Path.home() / ".superannotate" / "config.json")
TEMP_PROJECT_NAME = "test_recursive"
def test_nonrecursive_annotations_folder(tmpdir):
tmpdir = Path(tmpdir)
projects_found = sa.search_projects(
TEMP_PROJECT_NAME + "0", return_metadata=True
)
for pr in projects_found:
sa.delete_project(pr)
project = sa.create_project(TEMP_PROJECT_NAME + "0", "test", "Vector")
sa.upload_images_from_folder_to_project(
project,
"./tests/sample_recursive_test",
annotation_status="QualityCheck",
recursive_subfolders=True
)
assert len(sa.search_images(project)) == 2
sa.create_annotation_classes_from_classes_json(
project, "./tests/sample_recursive_test/classes/classes.json"
)
sa.upload_annotations_from_folder_to_project(
project, "./tests/sample_recursive_test", recursive_subfolders=False
)
export = sa.prepare_export(project)
time.sleep(1)
sa.download_export(project, export, tmpdir)
assert len(list(tmpdir.glob("*.json"))) == 1
def test_recursive_annotations_folder(tmpdir):
tmpdir = Path(tmpdir)
projects_found = sa.search_projects(
TEMP_PROJECT_NAME + "1", return_metadata=True
)
for pr in projects_found:
sa.delete_project(pr)
project = sa.create_project(TEMP_PROJECT_NAME + "1", "test", "Vector")
sa.upload_images_from_folder_to_project(
project,
"./tests/sample_recursive_test",
annotation_status="QualityCheck",
recursive_subfolders=True
)
assert len(sa.search_images(project)) == 2
sa.create_annotation_classes_from_classes_json(
project, "./tests/sample_recursive_test/classes/classes.json"
)
sa.upload_annotations_from_folder_to_project(
project, "./tests/sample_recursive_test", recursive_subfolders=True
)
export = sa.prepare_export(project)
time.sleep(1)
sa.download_export(project, export, tmpdir)
assert len(list(tmpdir.glob("*.json"))) == 2
def test_recursive_preannotations_folder(tmpdir):
tmpdir = Path(tmpdir)
projects_found = sa.search_projects(
TEMP_PROJECT_NAME + "2", return_metadata=True
)
for pr in projects_found:
sa.delete_project(pr)
project = sa.create_project(TEMP_PROJECT_NAME + "2", "test", "Vector")
sa.upload_images_from_folder_to_project(
project,
"./tests/sample_recursive_test",
annotation_status="QualityCheck",
recursive_subfolders=True
)
assert len(sa.search_images(project)) == 2
sa.create_annotation_classes_from_classes_json(
project, "./tests/sample_recursive_test/classes/classes.json"
)
sa.upload_preannotations_from_folder_to_project(
project, "./tests/sample_recursive_test", recursive_subfolders=True
)
for image in sa.search_images(project):
sa.download_image_preannotations(project, image, tmpdir)
assert len(list(tmpdir.glob("*.json"))) == 2
def test_nonrecursive_preannotations_folder(tmpdir):
tmpdir = Path(tmpdir)
projects_found = sa.search_projects(
TEMP_PROJECT_NAME + "3", return_metadata=True
)
for pr in projects_found:
sa.delete_project(pr)
project = sa.create_project(TEMP_PROJECT_NAME + "3", "test", "Vector")
sa.upload_images_from_folder_to_project(
project,
"./tests/sample_recursive_test",
annotation_status="QualityCheck",
recursive_subfolders=True
)
assert len(sa.search_images(project)) == 2
sa.create_annotation_classes_from_classes_json(
project, "./tests/sample_recursive_test/classes/classes.json"
)
sa.upload_preannotations_from_folder_to_project(
project, "./tests/sample_recursive_test", recursive_subfolders=False
)
for image in sa.search_images(project):
sa.download_image_preannotations(project, image, tmpdir)
assert len(list(tmpdir.glob("*.json"))) == 1
def test_annotations_recursive_s3_folder(tmpdir):
tmpdir = Path(tmpdir)
projects_found = sa.search_projects(
TEMP_PROJECT_NAME + "4", return_metadata=True
)
for pr in projects_found:
sa.delete_project(pr)
project = sa.create_project(TEMP_PROJECT_NAME + "4", "test", "Vector")
sa.upload_images_from_folder_to_project(
project,
"sample_recursive_test",
annotation_status="QualityCheck",
from_s3_bucket="superannotate-python-sdk-test",
recursive_subfolders=True
)
assert len(sa.search_images(project)) == 2
sa.create_annotation_classes_from_classes_json(
project,
"sample_recursive_test/classes/classes.json",
from_s3_bucket="superannotate-python-sdk-test"
)
sa.upload_annotations_from_folder_to_project(
project,
"sample_recursive_test",
recursive_subfolders=True,
from_s3_bucket="superannotate-python-sdk-test"
)
export = sa.prepare_export(project)
time.sleep(1)
sa.download_export(project, export, tmpdir)
assert len(list(tmpdir.glob("*.json"))) == 2
def test_annotations_nonrecursive_s3_folder(tmpdir):
tmpdir = Path(tmpdir)
projects_found = sa.search_projects(
TEMP_PROJECT_NAME + "5", return_metadata=True
)
for pr in projects_found:
sa.delete_project(pr)
project = sa.create_project(TEMP_PROJECT_NAME + "5", "test", "Vector")
sa.upload_images_from_folder_to_project(
project,
"sample_recursive_test",
annotation_status="QualityCheck",
from_s3_bucket="superannotate-python-sdk-test",
recursive_subfolders=True
)
assert len(sa.search_images(project)) == 2
sa.create_annotation_classes_from_classes_json(
project,
"sample_recursive_test/classes/classes.json",
from_s3_bucket="superannotate-python-sdk-test"
)
sa.upload_annotations_from_folder_to_project(
project,
"sample_recursive_test",
recursive_subfolders=False,
from_s3_bucket="superannotate-python-sdk-test"
)
export = sa.prepare_export(project)
time.sleep(1)
sa.download_export(project, export, tmpdir)
assert len(list(tmpdir.glob("*.json"))) == 1
def test_preannotations_recursive_s3_folder(tmpdir):
tmpdir = Path(tmpdir)
projects_found = sa.search_projects(
TEMP_PROJECT_NAME + "6", return_metadata=True
)
for pr in projects_found:
sa.delete_project(pr)
project = sa.create_project(TEMP_PROJECT_NAME + "6", "test", "Vector")
sa.upload_images_from_folder_to_project(
project,
"sample_recursive_test",
from_s3_bucket="superannotate-python-sdk-test",
recursive_subfolders=True
)
assert len(sa.search_images(project)) == 2
sa.create_annotation_classes_from_classes_json(
project,
"sample_recursive_test/classes/classes.json",
from_s3_bucket="superannotate-python-sdk-test"
)
sa.upload_preannotations_from_folder_to_project(
project,
"sample_recursive_test",
recursive_subfolders=True,
from_s3_bucket="superannotate-python-sdk-test"
)
for image in sa.search_images(project):
sa.download_image_preannotations(project, image, tmpdir)
assert len(list(tmpdir.glob("*.json"))) == 2
def test_preannotations_nonrecursive_s3_folder(tmpdir):
tmpdir = Path(tmpdir)
projects_found = sa.search_projects(
TEMP_PROJECT_NAME + "7", return_metadata=True
)
for pr in projects_found:
sa.delete_project(pr)
project = sa.create_project(TEMP_PROJECT_NAME + "7", "test", "Vector")
sa.upload_images_from_folder_to_project(
project,
"sample_recursive_test",
from_s3_bucket="superannotate-python-sdk-test",
recursive_subfolders=True
)
assert len(sa.search_images(project)) == 2
sa.create_annotation_classes_from_classes_json(
project,
"sample_recursive_test/classes/classes.json",
from_s3_bucket="superannotate-python-sdk-test"
)
sa.upload_preannotations_from_folder_to_project(
project,
"sample_recursive_test",
recursive_subfolders=False,
from_s3_bucket="superannotate-python-sdk-test"
)
for image in sa.search_images(project):
sa.download_image_preannotations(project, image, tmpdir)
def test_images_nonrecursive_s3(tmpdir):
tmpdir = Path(tmpdir)
projects_found = sa.search_projects(
TEMP_PROJECT_NAME + "8", return_metadata=True
)
for pr in projects_found:
sa.delete_project(pr)
project = sa.create_project(TEMP_PROJECT_NAME + "8", "test", "Vector")
sa.upload_images_from_folder_to_project(
project,
"sample_recursive_test",
from_s3_bucket="superannotate-python-sdk-test",
recursive_subfolders=False
)
assert len(sa.search_images(project)) == 1
def test_images_nonrecursive(tmpdir):
tmpdir = Path(tmpdir)
projects_found = sa.search_projects(
TEMP_PROJECT_NAME + "9", return_metadata=True
)
for pr in projects_found:
sa.delete_project(pr)
project = sa.create_project(TEMP_PROJECT_NAME + "9", "test", "Vector")
sa.upload_images_from_folder_to_project(
project, "./tests/sample_recursive_test", recursive_subfolders=False
)
assert len(sa.search_images(project)) == 1
| 28.14497
| 76
| 0.694628
| 1,142
| 9,513
| 5.437828
| 0.06042
| 0.062802
| 0.079549
| 0.055072
| 0.953301
| 0.953301
| 0.953301
| 0.953301
| 0.953301
| 0.953301
| 0
| 0.007804
| 0.205298
| 9,513
| 337
| 77
| 28.228487
| 0.813624
| 0
| 0
| 0.710843
| 0
| 0
| 0.154315
| 0.125618
| 0
| 0
| 0
| 0
| 0.068273
| 1
| 0.040161
| false
| 0
| 0.012048
| 0
| 0.052209
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16f6dcfc83c26e466e4ea2a8f2fce8523cdced4a
| 330
|
py
|
Python
|
easyasyncio/constants.py
|
raph92/EasyAsyncio
|
6f2e42ba20cd97577cfeb609bdc479929889c30d
|
[
"MIT"
] | 1
|
2020-01-14T05:38:17.000Z
|
2020-01-14T05:38:17.000Z
|
easyasyncio/constants.py
|
raph92/EasyAsyncio
|
6f2e42ba20cd97577cfeb609bdc479929889c30d
|
[
"MIT"
] | null | null | null |
easyasyncio/constants.py
|
raph92/EasyAsyncio
|
6f2e42ba20cd97577cfeb609bdc479929889c30d
|
[
"MIT"
] | null | null | null |
class Constants:
HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:70.0) '
'Gecko/20100101 Firefox/70.0'
}
HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:70.0) '
'Gecko/20100101 Firefox/70.0',
"Connection": "close"
}
| 25.384615
| 69
| 0.493939
| 40
| 330
| 4.025
| 0.475
| 0.074534
| 0.198758
| 0.285714
| 0.819876
| 0.819876
| 0.819876
| 0.819876
| 0.819876
| 0.819876
| 0
| 0.205607
| 0.351515
| 330
| 12
| 70
| 27.5
| 0.546729
| 0
| 0
| 0.4
| 0
| 0
| 0.518182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e5070a1c68a945b03eb5744b7c6cf0262b2938e6
| 6,199
|
py
|
Python
|
migrations/versions/339c728950cb_.py
|
datosgobar/georef
|
7202acedcebe4705dccf1fde622e9b3b35d92cb5
|
[
"MIT"
] | 5
|
2018-10-03T18:36:50.000Z
|
2022-01-05T20:59:07.000Z
|
migrations/versions/339c728950cb_.py
|
datosgobar/georef-ar-etl
|
7202acedcebe4705dccf1fde622e9b3b35d92cb5
|
[
"MIT"
] | 34
|
2018-10-03T18:13:46.000Z
|
2019-05-31T18:54:49.000Z
|
migrations/versions/339c728950cb_.py
|
datosgobar/georef
|
7202acedcebe4705dccf1fde622e9b3b35d92cb5
|
[
"MIT"
] | 2
|
2019-12-09T16:39:45.000Z
|
2020-10-30T02:10:19.000Z
|
"""Initial migration
Revision ID: 339c728950cb
Revises:
Create Date: 2019-04-08 09:30:44.981425
"""
from alembic import op
import sqlalchemy as sa
import geoalchemy2
# revision identifiers, used by Alembic.
revision = '339c728950cb'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('georef_provincias',
sa.Column('id', sa.String(), nullable=False),
sa.Column('nombre', sa.String(), nullable=False),
sa.Column('fuente', sa.String(), nullable=False),
sa.Column('categoria', sa.String(), nullable=False),
sa.Column('nombre_completo', sa.String(), nullable=False),
sa.Column('iso_id', sa.String(), nullable=False),
sa.Column('iso_nombre', sa.String(), nullable=False),
sa.Column('lon', sa.Float(), nullable=False),
sa.Column('lat', sa.Float(), nullable=False),
sa.Column('geometria', geoalchemy2.types.Geometry(geometry_type='MULTIPOLYGON', srid=4326), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('georef_departamentos',
sa.Column('id', sa.String(), nullable=False),
sa.Column('nombre', sa.String(), nullable=False),
sa.Column('fuente', sa.String(), nullable=False),
sa.Column('categoria', sa.String(), nullable=False),
sa.Column('nombre_completo', sa.String(), nullable=False),
sa.Column('provincia_interseccion', sa.Float(), nullable=False),
sa.Column('lon', sa.Float(), nullable=False),
sa.Column('lat', sa.Float(), nullable=False),
sa.Column('geometria', geoalchemy2.types.Geometry(geometry_type='MULTIPOLYGON', srid=4326), nullable=False),
sa.Column('provincia_id', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['provincia_id'], ['georef_provincias.id'], ondelete='cascade'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('georef_municipios',
sa.Column('id', sa.String(), nullable=False),
sa.Column('nombre', sa.String(), nullable=False),
sa.Column('fuente', sa.String(), nullable=False),
sa.Column('categoria', sa.String(), nullable=False),
sa.Column('nombre_completo', sa.String(), nullable=False),
sa.Column('provincia_interseccion', sa.Float(), nullable=False),
sa.Column('lon', sa.Float(), nullable=False),
sa.Column('lat', sa.Float(), nullable=False),
sa.Column('geometria', geoalchemy2.types.Geometry(geometry_type='MULTIPOLYGON', srid=4326), nullable=False),
sa.Column('provincia_id', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['provincia_id'], ['georef_provincias.id'], ondelete='cascade'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('georef_calles',
sa.Column('id', sa.String(), nullable=False),
sa.Column('nombre', sa.String(), nullable=False),
sa.Column('fuente', sa.String(), nullable=False),
sa.Column('categoria', sa.String(), nullable=False),
sa.Column('inicio_derecha', sa.Integer(), nullable=False),
sa.Column('fin_derecha', sa.Integer(), nullable=False),
sa.Column('inicio_izquierda', sa.Integer(), nullable=False),
sa.Column('fin_izquierda', sa.Integer(), nullable=False),
sa.Column('geometria', geoalchemy2.types.Geometry(geometry_type='MULTILINESTRING', srid=4326), nullable=False),
sa.Column('provincia_id', sa.String(), nullable=False),
sa.Column('departamento_id', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['departamento_id'], ['georef_departamentos.id'], ondelete='cascade'),
sa.ForeignKeyConstraint(['provincia_id'], ['georef_provincias.id'], ondelete='cascade'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('georef_localidades',
sa.Column('id', sa.String(), nullable=False),
sa.Column('nombre', sa.String(), nullable=False),
sa.Column('fuente', sa.String(), nullable=False),
sa.Column('categoria', sa.String(), nullable=False),
sa.Column('municipio_id', sa.String(), nullable=True),
sa.Column('lon', sa.Float(), nullable=False),
sa.Column('lat', sa.Float(), nullable=False),
sa.Column('geometria', geoalchemy2.types.Geometry(geometry_type='MULTIPOINT', srid=4326), nullable=False),
sa.Column('provincia_id', sa.String(), nullable=False),
sa.Column('departamento_id', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['departamento_id'], ['georef_departamentos.id'], ondelete='cascade'),
sa.ForeignKeyConstraint(['municipio_id'], ['georef_municipios.id'], ondelete='cascade'),
sa.ForeignKeyConstraint(['provincia_id'], ['georef_provincias.id'], ondelete='cascade'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('georef_cuadras',
sa.Column('inicio_derecha', sa.Integer(), nullable=False),
sa.Column('fin_derecha', sa.Integer(), nullable=False),
sa.Column('inicio_izquierda', sa.Integer(), nullable=False),
sa.Column('fin_izquierda', sa.Integer(), nullable=False),
sa.Column('id', sa.String(), nullable=False),
sa.Column('calle_id', sa.String(), nullable=False),
sa.Column('geometria', geoalchemy2.types.Geometry(geometry_type='MULTILINESTRING', srid=4326), nullable=False),
sa.ForeignKeyConstraint(['calle_id'], ['georef_calles.id'], ondelete='cascade'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('georef_intersecciones',
sa.Column('id', sa.String(), nullable=False),
sa.Column('calle_a_id', sa.String(), nullable=False),
sa.Column('calle_b_id', sa.String(), nullable=False),
sa.Column('geometria', geoalchemy2.types.Geometry(geometry_type='POINT', srid=4326), nullable=False),
sa.ForeignKeyConstraint(['calle_a_id'], ['georef_calles.id'], ondelete='cascade'),
sa.ForeignKeyConstraint(['calle_b_id'], ['georef_calles.id'], ondelete='cascade'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('georef_intersecciones')
op.drop_table('georef_cuadras')
op.drop_table('georef_localidades')
op.drop_table('georef_calles')
op.drop_table('georef_municipios')
op.drop_table('georef_departamentos')
op.drop_table('georef_provincias')
# ### end Alembic commands ###
| 48.429688
| 115
| 0.694789
| 751
| 6,199
| 5.619174
| 0.11984
| 0.117536
| 0.216825
| 0.26872
| 0.847393
| 0.842654
| 0.842654
| 0.795735
| 0.766351
| 0.75
| 0
| 0.013588
| 0.121471
| 6,199
| 127
| 116
| 48.811024
| 0.761293
| 0.046298
| 0
| 0.614679
| 0
| 0
| 0.215489
| 0.022468
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018349
| false
| 0
| 0.027523
| 0
| 0.045872
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e582f7e76937d43ff367d0f1dd8ea33e9924baa4
| 5,729
|
py
|
Python
|
dfirtrack_api/tests/dfirtrack_artifacts/artifactstatus/test_artifactstatus_api_views.py
|
thomas-kropeit/dfirtrack
|
b1e0e659af7bc8085cfe2d269ddc651f9f4ba585
|
[
"Apache-2.0"
] | 273
|
2018-04-18T22:09:15.000Z
|
2021-06-04T09:15:48.000Z
|
dfirtrack_api/tests/dfirtrack_artifacts/artifactstatus/test_artifactstatus_api_views.py
|
stuhli/dfirtrack
|
9260c91e4367b36d4cb1ae7efe4e2d2452f58e6e
|
[
"Apache-2.0"
] | 75
|
2018-08-31T11:05:37.000Z
|
2021-06-08T14:15:07.000Z
|
dfirtrack_api/tests/dfirtrack_artifacts/artifactstatus/test_artifactstatus_api_views.py
|
thomas-kropeit/dfirtrack
|
b1e0e659af7bc8085cfe2d269ddc651f9f4ba585
|
[
"Apache-2.0"
] | 61
|
2018-11-12T22:55:48.000Z
|
2021-06-06T15:16:16.000Z
|
import urllib.parse
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_artifacts.models import Artifactstatus
class ArtifactstatusAPIViewTestCase(TestCase):
"""artifactstatus API view tests"""
@classmethod
def setUpTestData(cls):
# create object
Artifactstatus.objects.create(artifactstatus_name='artifactstatus_api_1')
# create user
User.objects.create_user(
username='testuser_artifactstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
def test_artifactstatus_list_api_unauthorized(self):
"""unauthorized access is forbidden"""
# get response
response = self.client.get('/api/artifactstatus/')
# compare
self.assertEqual(response.status_code, 401)
def test_artifactstatus_list_api_method_get(self):
"""GET is allowed"""
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# get response
response = self.client.get('/api/artifactstatus/')
# compare
self.assertEqual(response.status_code, 200)
def test_artifactstatus_list_api_method_post(self):
"""POST is forbidden"""
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# create POST string
poststring = {"artifactstatus_name": "artifactstatus_api_2"}
# get response
response = self.client.post('/api/artifactstatus/', data=poststring)
# compare
self.assertEqual(response.status_code, 405)
def test_artifactstatus_list_api_redirect(self):
"""test redirect with appending slash"""
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# create url
destination = urllib.parse.quote('/api/artifactstatus/', safe='/')
# get response
response = self.client.get('/api/artifactstatus', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_artifactstatus_detail_api_unauthorized(self):
"""unauthorized access is forbidden"""
# get object
artifactstatus_api_1 = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_api_1'
)
# get response
response = self.client.get(
'/api/artifactstatus/' + str(artifactstatus_api_1.artifactstatus_id) + '/'
)
# compare
self.assertEqual(response.status_code, 401)
def test_artifactstatus_detail_api_method_get(self):
"""GET is allowed"""
# get object
artifactstatus_api_1 = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_api_1'
)
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# get response
response = self.client.get(
'/api/artifactstatus/' + str(artifactstatus_api_1.artifactstatus_id) + '/'
)
# compare
self.assertEqual(response.status_code, 200)
def test_artifactstatus_detail_api_method_delete(self):
"""DELETE is forbidden"""
# get object
artifactstatus_api_1 = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_api_1'
)
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# get response
response = self.client.delete(
'/api/artifactstatus/' + str(artifactstatus_api_1.artifactstatus_id) + '/'
)
# compare
self.assertEqual(response.status_code, 405)
def test_artifactstatus_detail_api_method_put(self):
"""PUT is forbidden"""
# get object
artifactstatus_api_1 = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_api_1'
)
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# create url
destination = urllib.parse.quote(
'/api/artifactstatus/' + str(artifactstatus_api_1.artifactstatus_id) + '/',
safe='/',
)
# create PUT string
putstring = {"artifactstatus_name": "new_artifactstatus_api_1"}
# get response
response = self.client.put(
destination, data=putstring, content_type='application/json'
)
# compare
self.assertEqual(response.status_code, 405)
def test_artifactstatus_detail_api_redirect(self):
"""test redirect with appending slash"""
# get object
artifactstatus_api_1 = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_api_1'
)
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# create url
destination = urllib.parse.quote(
'/api/artifactstatus/' + str(artifactstatus_api_1.artifactstatus_id) + '/',
safe='/',
)
# get response
response = self.client.get(
'/api/artifactstatus/' + str(artifactstatus_api_1.artifactstatus_id),
follow=True,
)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
| 33.30814
| 87
| 0.635189
| 535
| 5,729
| 6.566355
| 0.149533
| 0.135497
| 0.092229
| 0.100199
| 0.845716
| 0.818958
| 0.791631
| 0.77569
| 0.703103
| 0.688016
| 0
| 0.014399
| 0.272648
| 5,729
| 171
| 88
| 33.502924
| 0.828654
| 0.121487
| 0
| 0.5
| 0
| 0
| 0.165993
| 0.048524
| 0
| 0
| 0
| 0
| 0.09
| 1
| 0.1
| false
| 0.08
| 0.04
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
009bd50f3a057c0c057a987cff77fb8b8daf1a4e
| 35,306
|
py
|
Python
|
tests/test_adaptavist.py
|
devolo/adaptavist
|
8e5e9249d2640850e1f7bf5617a9fad19834e2f5
|
[
"MIT"
] | 3
|
2020-06-11T11:37:28.000Z
|
2022-03-10T19:27:55.000Z
|
tests/test_adaptavist.py
|
devolo/adaptavist
|
8e5e9249d2640850e1f7bf5617a9fad19834e2f5
|
[
"MIT"
] | 7
|
2020-02-14T13:19:55.000Z
|
2021-05-20T15:27:36.000Z
|
tests/test_adaptavist.py
|
devolo/adaptavist
|
8e5e9249d2640850e1f7bf5617a9fad19834e2f5
|
[
"MIT"
] | 9
|
2020-04-21T05:41:48.000Z
|
2022-01-09T13:13:17.000Z
|
"""Test the Adaptavist module."""
import json
from io import BytesIO
from unittest.mock import mock_open, patch
from pytest import raises
from requests_mock import Mocker
from adaptavist import Adaptavist
from adaptavist.const import STATUS_FAIL, STATUS_PASS
from . import load_fixture
class TestAdaptavist:
_jira_url = "mock://jira"
_adaptavist_api_url = f"{_jira_url}/rest/atm/1.0"
def test_get_users(self, requests_mock: Mocker):
"""Test getting all users."""
requests_mock.get(f"{TestAdaptavist._jira_url}/rest/api/2/user/search?username=.&startAt=0&maxResults=200", text=load_fixture("get_users.json"))
requests_mock.get(f"{TestAdaptavist._jira_url}/rest/api/2/user/search?username=.&startAt=1&maxResults=200", text="[]")
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
users = adaptavist.get_users()
assert users == ["Testuser"]
def test_get_projects(self, requests_mock: Mocker):
"""Test getting all projects."""
requests_mock.get(f"{TestAdaptavist._jira_url}/rest/tests/1.0/project", text=load_fixture("get_projects.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
projects = adaptavist.get_projects()
assert projects[0]["id"] == 10000
def test_get_environments(self, requests_mock: Mocker):
"""Test getting all environments of a project."""
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/environments?projectKey=JQA", text=load_fixture("get_environments.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
environment = adaptavist.get_environments(project_key="JQA")
assert environment[0]["id"] == 100
def test_create_environment(self, requests_mock: Mocker):
"""Test creating an environment for a project."""
requests_mock.post(f"{TestAdaptavist._adaptavist_api_url}/environments", text=load_fixture("create_environment.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
environment = adaptavist.create_environment(project_key="TEST", environment_name="Test environment", description="Cool new environment for testing.")
assert environment == 37
def test_get_folders(self, requests_mock: Mocker):
"""Test getting all folders of a project."""
requests_mock.get(f"{TestAdaptavist._jira_url}/rest/tests/1.0/project/10000/foldertree/testcase?startAt=0&maxResults=200",
text=load_fixture("get_folders.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_projects", return_value=json.loads(load_fixture("get_projects.json"))):
folders = adaptavist.get_folders(project_key="TEST", folder_type="TEST_CASE")
assert folders == ["/", "/Test folder"]
def test_create_folder(self, requests_mock: Mocker):
"""Test creating a folder in a project."""
requests_mock.post(f"{TestAdaptavist._adaptavist_api_url}/folder", text=load_fixture("create_folder.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_folders", return_value=["/"]):
folders = adaptavist.create_folder(project_key="TEST", folder_type="TEST_CASE", folder_name="Test folder")
assert folders == 123
# Test that existing folders are not created twice
with patch("adaptavist.Adaptavist.get_folders", return_value=["/", "/Test folder"]):
assert adaptavist.create_folder(project_key="TEST", folder_type="TEST_CASE", folder_name="Test folder") is None
# Test that the root folder is not created again
with patch("adaptavist.Adaptavist.get_folders"):
assert adaptavist.create_folder(project_key="TEST", folder_type="TEST_CASE", folder_name="/") is None
def test_get_test_case(self, requests_mock: Mocker):
"""Test getting a single test case of a project."""
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/testcase/JQA-T123", text=load_fixture("get_test_case.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
test_case = adaptavist.get_test_case(test_case_key="JQA-T123")
assert test_case["key"] == "JQA-T123"
def test_get_test_cases(self, requests_mock: Mocker):
"""Test getting all test cases of a project."""
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/testcase/search?query=folder+%3C%3D+%22%2F%22&startAt=0",
text=load_fixture("get_test_cases.json"))
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/testcase/search?query=folder+%3C%3D+%22%2F%22&startAt=1", text="[]")
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
test_cases = adaptavist.get_test_cases()
assert test_cases[0]["key"] == "JQA-T123"
def test_create_test_case(self, requests_mock: Mocker):
"""Test creating a test case for a project."""
requests_mock.post(f"{TestAdaptavist._adaptavist_api_url}/testcase", text=load_fixture("create_test_case.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.create_folder"):
test_case = adaptavist.create_test_case(project_key="JQA", test_case_name="Ensure the axial-flow pump is enabled", folder="Test folder")
assert test_case == "JQA-T123"
# Test that folder is submitted as null if the root folder is chosen
with patch("adaptavist.Adaptavist.create_folder"), \
patch("adaptavist.Adaptavist._post") as post:
adaptavist.create_test_case(project_key="JQA", test_case_name="Ensure the axial-flow pump is enabled")
assert post.call_args_list[0][0][1]['folder'] is None
def test_edit_test_case(self, requests_mock: Mocker):
"""Test editing a test case of a project."""
requests_mock.put(f"{TestAdaptavist._adaptavist_api_url}/testcase/JQA-T123")
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_test_case", return_value={"name": "Test case", "projectKey": "JQA"}), \
patch("adaptavist.Adaptavist.create_folder"):
assert adaptavist.edit_test_case(test_case_key="JQA-T123", folder="Test folder")
# Test that folder is submitted as null if the root folder is chosen
with patch("adaptavist.Adaptavist.get_test_case", return_value={"name": "Test case", "projectKey": "JQA"}), \
patch("adaptavist.Adaptavist.create_folder"), \
patch("adaptavist.Adaptavist._put") as put:
assert adaptavist.edit_test_case(test_case_key="JQA-T123", folder="/")
assert put.call_args_list[0][0][1]['folder'] is None
# Test that existing labels are removed, if the list starts with "-"
with patch("adaptavist.Adaptavist.get_test_case", return_value={"name": "Test case", "projectKey": "JQA", "labels": ["automated"]}), \
patch("adaptavist.Adaptavist.create_folder"), \
patch("adaptavist.Adaptavist._put") as put:
assert adaptavist.edit_test_case(test_case_key="JQA-T123", folder="/", labels=["-", "tested"])
assert put.call_args_list[0][0][1]['labels'] == ["tested"]
# Test that existing custom fields are emptied, if the list starts with "-"
with patch("adaptavist.Adaptavist.get_test_case", return_value={"name": "Test case", "projectKey": "JQA", "ci_server_url": ["mock://jenkins"]}), \
patch("adaptavist.Adaptavist.create_folder"), \
patch("adaptavist.Adaptavist._put") as put:
assert adaptavist.edit_test_case(test_case_key="JQA-T123", folder="/", build_urls=["-", "mock://gitlab"])
assert put.call_args_list[0][0][1]['customFields'] == {"ci_server_url": "mock://gitlab"}
def test_delete_test_case(self, requests_mock: Mocker):
"""Test deleting a test case of a project."""
requests_mock.delete(f"{TestAdaptavist._adaptavist_api_url}/testcase/JQA-T123")
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
assert adaptavist.delete_test_case(test_case_key="JQA-T123")
def test_get_test_case_links(self, requests_mock: Mocker):
"""Test getting a list of test cases linked to an issue."""
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/issuelink/JQA-1234/testcases", text=load_fixture("get_test_case_links.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
test_cases = adaptavist.get_test_case_links(issue_key="JQA-1234")
assert test_cases[0]["key"] == "JQA-T123"
def test_link_test_cases(self, requests_mock: Mocker):
"""Test linking an issue to test cases."""
requests_mock.put(f"{TestAdaptavist._adaptavist_api_url}/testcase/JQA-T123")
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_test_case", return_value={"name": "Test case", "projectKey": "JQA"}):
assert adaptavist.link_test_cases(issue_key="JQA-123", test_case_keys=["JQA-T123"])
# Test linking multiple test cases
with patch("adaptavist.Adaptavist.get_test_case", return_value={"name": "Test case", "projectKey": "JQA"}), \
patch("adaptavist.Adaptavist._put") as put:
assert adaptavist.link_test_cases(issue_key="JQA-123", test_case_keys=["JQA-T123", "JQA-T124"])
assert put.call_count == 2
# Test that adding already existing issues do not trigger I/O
with patch("adaptavist.Adaptavist.get_test_case", return_value={"name": "Test case", "projectKey": "JQA", "issueLinks": ["JQA-123"]}), \
patch("adaptavist.Adaptavist._put") as put:
assert adaptavist.link_test_cases(issue_key="JQA-123", test_case_keys=["JQA-T123"])
assert put.assert_not_called
def test_unlink_test_cases(self, requests_mock: Mocker):
"""Test unlinking an issue from a test cases."""
requests_mock.put(f"{TestAdaptavist._adaptavist_api_url}/testcase/JQA-T123")
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_test_case", return_value={"name": "Test case", "projectKey": "JQA"}):
assert adaptavist.unlink_test_cases(issue_key="JQA-123", test_case_keys=["JQA-T123"])
# Test unlinking multiple test cases
# Actually, we are cheating here a bit: using link_test_cases the same issue cannot be linked twice.
# But to trick how Python handles lists, we return the same issue twice in the patched return value.
with patch("adaptavist.Adaptavist.get_test_case", return_value={"name": "Test case", "projectKey": "JQA", "issueLinks": ["JQA-123", "JQA-123"]}), \
patch("adaptavist.Adaptavist._put") as put:
assert adaptavist.unlink_test_cases(issue_key="JQA-123", test_case_keys=["JQA-T123", "JQA-T124"])
assert put.call_count == 2
# Test that not linked issues do not trigger I/O
with patch("adaptavist.Adaptavist.get_test_case", return_value={"name": "Test case", "projectKey": "JQA", "issueLinks": ["JQA-124"]}), \
patch("adaptavist.Adaptavist._put") as put:
assert adaptavist.unlink_test_cases(issue_key="JQA-123", test_case_keys=["JQA-T123"])
assert put.assert_not_called
def test_get_test_plan(self, requests_mock: Mocker):
"""Test getting a test plan of a project."""
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/testplan/JQA-P1234", text=load_fixture("get_test_plan.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
test_plan = adaptavist.get_test_plan(test_plan_key="JQA-P1234")
assert test_plan["key"] == "JQA-P123"
def test_get_test_plans(self, requests_mock: Mocker):
"""Test getting all test plans of a project."""
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/testplan/search?query=folder+%3C%3D+%22%2F%22&startAt=0",
text=load_fixture("get_test_plans.json"))
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/testplan/search?query=folder+%3C%3D+%22%2F%22&startAt=1", text="[]")
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
test_plan = adaptavist.get_test_plans()
assert test_plan[0]["key"] == "JQA-P123"
def test_create_test_plan(self, requests_mock: Mocker):
"""Test creating a test plan for a project."""
requests_mock.post(f"{TestAdaptavist._adaptavist_api_url}/testplan", text=load_fixture("create_test_plan.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.create_folder"):
test_plan = adaptavist.create_test_plan(project_key="JQA", test_plan_name="Plan for a new version", folder="Test folder")
assert test_plan == "JQA-P123"
# Test that folder is submitted as null if the root folder is chosen
with patch("adaptavist.Adaptavist.create_folder"), \
patch("adaptavist.Adaptavist._post") as post:
adaptavist.create_test_plan(project_key="JQA", test_plan_name="Plan for a new version")
assert post.call_args_list[0][0][1]['folder'] is None
def test_edit_test_plan(self, requests_mock: Mocker):
"""Test editing a test plan of a project."""
requests_mock.put(f"{TestAdaptavist._adaptavist_api_url}/testplan/JQA-P123")
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_test_plan", return_value={"name": "Test plan", "projectKey": "JQA"}), \
patch("adaptavist.Adaptavist.create_folder"):
assert adaptavist.edit_test_plan(test_plan_key="JQA-P123", folder="Test folder")
# Test that folder is submitted as null if the root folder is chosen
with patch("adaptavist.Adaptavist.get_test_plan", return_value={"name": "Test plan", "projectKey": "JQA"}), \
patch("adaptavist.Adaptavist.create_folder"), \
patch("adaptavist.Adaptavist._put") as put:
assert adaptavist.edit_test_plan(test_plan_key="JQA-P123", folder="/")
assert put.call_args_list[0][0][1]['folder'] is None
# Test that existing labels are removed, if the list starts with "-"
with patch("adaptavist.Adaptavist.get_test_plan", return_value={"name": "Test plan", "projectKey": "JQA", "labels": ["automated"]}), \
patch("adaptavist.Adaptavist.create_folder"), \
patch("adaptavist.Adaptavist._put") as put:
assert adaptavist.edit_test_plan(test_plan_key="JQA-P123", folder="/", labels=["-", "tested"])
assert put.call_args_list[0][0][1]['labels'] == ["tested"]
def test_get_test_run(self, requests_mock: Mocker):
"""Test getting a test run of a project by its key."""
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/testrun/JQA-R123", text=load_fixture("get_test_run.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
test_run = adaptavist.get_test_run(test_run_key="JQA-R123")
assert test_run["key"] == "JQA-R123"
def test_get_test_run_by_name(self, requests_mock: Mocker):
"""Test getting a test run of a project by its name."""
requests_mock.get(
f"{TestAdaptavist._jira_url}/rest/tests/1.0/testrun/search?startAt=0&maxResults=10000&query=testRun.name+%3D+%22Testplan%22&fields=id,key,name",
text=load_fixture("get_test_run_by_name.json"))
requests_mock.get(
f"{TestAdaptavist._jira_url}/rest/tests/1.0/testrun/search?startAt=1&maxResults=10000&query=testRun.name+%3D+%22Testplan%22&fields=id,key,name",
text='{"results":[]}')
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
test_run = adaptavist.get_test_run_by_name(test_run_name="Testplan")
assert test_run["key"] == "JQA-R123"
assert test_run["name"] == "Testplan"
def test_get_test_runs(self, requests_mock: Mocker):
"""Test getting all test runs of a project."""
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/testrun/search?query=folder+%3D+%22%2F%22&startAt=0", text=load_fixture("get_test_runs.json"))
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/testrun/search?query=folder+%3D+%22%2F%22&startAt=1", text="[]")
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
test_run = adaptavist.get_test_runs()
assert test_run[0]["key"] == "JQA-R123"
def test_get_test_run_links(self):
"""Test getting issues linked to a run of a project."""
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_test_runs", return_value=json.loads(load_fixture("get_test_runs.json"))):
test_run = adaptavist.get_test_run_links(issue_key="JQA-123")
assert test_run[0]["key"] == "JQA-R123"
def test_create_test_run(self, requests_mock: Mocker):
"""Test creating a test run for a project."""
requests_mock.post(f"{TestAdaptavist._adaptavist_api_url}/testrun", text=load_fixture("create_test_run.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.create_folder"):
test_run = adaptavist.create_test_run(project_key="JQA", test_run_name="Run for a new version", folder="Test folder")
assert test_run == "JQA-R123"
# Test that folder is submitted as null if the root folder is chosen
with patch("adaptavist.Adaptavist.create_folder"), \
patch("adaptavist.Adaptavist._post") as post:
adaptavist.create_test_run(project_key="JQA", test_run_name="Plan for a new version")
assert post.call_args_list[0][0][1]['folder'] is None
def test_clone_test_run(self):
"""Test cloning an existing test run."""
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_test_run", return_value=json.loads(load_fixture("get_test_run.json"))), \
patch("adaptavist.Adaptavist.create_test_run", return_value="JQA-R124") as create_test_run, \
patch("adaptavist.Adaptavist.get_test_plans", return_value=json.loads(load_fixture("get_test_plans.json"))), \
patch("adaptavist.Adaptavist.edit_test_plan") as edit_test_plan:
test_run = adaptavist.clone_test_run(test_run_key="JQA-R123", test_run_name="Cloned test case")
assert test_run == "JQA-R124"
assert create_test_run.call_args_list[0][1]["test_run_name"] == "Cloned test case"
assert edit_test_plan.assert_called_once
# Test that cloned test cases are only linked, if the original test case was linked to something
with patch("adaptavist.Adaptavist.get_test_run", return_value=json.loads(load_fixture("get_test_run.json"))), \
patch("adaptavist.Adaptavist.create_test_run", return_value="JQA-R124") as create_test_run, \
patch("adaptavist.Adaptavist.get_test_plans", return_value=[]), \
patch("adaptavist.Adaptavist.edit_test_plan") as edit_test_plan:
test_run = adaptavist.clone_test_run(test_run_key="JQA-R123")
assert test_run == "JQA-R124"
assert edit_test_plan.assert_not_called
# Test that cloned test append a suffix, if no name is given
with patch("adaptavist.Adaptavist.get_test_run", return_value=json.loads(load_fixture("get_test_run.json"))), \
patch("adaptavist.Adaptavist.create_test_run", return_value="JQA-R124") as create_test_run, \
patch("adaptavist.Adaptavist.get_test_plans", return_value=[]), \
patch("adaptavist.Adaptavist.edit_test_plan"):
test_run = adaptavist.clone_test_run(test_run_key="JQA-R123")
assert create_test_run.call_args_list[0][1]["test_run_name"] == "Full regression (cloned from JQA-R123)"
def test_get_test_execution_results(self, requests_mock: Mocker):
"""Test getting all test execution results."""
requests_mock.get(f"{TestAdaptavist._jira_url}/rest/tests/1.0/reports/testresults?startAt=0&maxResults=10000",
text=load_fixture("get_test_execution_results.json"))
requests_mock.get(f"{TestAdaptavist._jira_url}/rest/tests/1.0/reports/testresults?startAt=1&maxResults=10000", text='{"results":[]}')
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
results = adaptavist.get_test_execution_results()
assert results[0]["key"] == "JQA-E123"
def test_get_test_results(self, requests_mock: Mocker):
"""Test getting test results of a test run."""
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/testrun/JQA-T123/testresults", text=load_fixture("get_test_results.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
results = adaptavist.get_test_results(test_run_key="JQA-T123")
assert results[0]["testCaseKey"] == "JQA-T123"
def test_create_test_results(self, requests_mock: Mocker):
"""Test creating test results."""
requests_mock.post(f"{TestAdaptavist._adaptavist_api_url}/testrun/JQA-R123/testresults", text=load_fixture("create_test_results.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_test_run", return_value=json.loads(load_fixture("get_test_run.json"))):
assert adaptavist.create_test_results(test_run_key="JQA-R123", results=[{"status": "Fail", "testCaseKey": "JQA-T5678"}])
# Test that executor and assignee are submitted as null if empty string is given
with patch("adaptavist.Adaptavist.get_test_run", return_value=json.loads(load_fixture("get_test_run.json"))), \
patch("adaptavist.Adaptavist._post") as post:
adaptavist.create_test_results(test_run_key="JQA-R123", results=[{"status": "Fail", "testCaseKey": "JQA-T5678"}], assignee="", executor="")
assert post.call_args_list[0][0][1][0]['assignedTo'] is None
assert post.call_args_list[0][0][1][0]['executedBy'] is None
def test_get_test_result(self):
"""Test getting a test result of a test run."""
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_test_results", return_value=json.loads(load_fixture("get_test_results.json"))):
result = adaptavist.get_test_result(test_run_key="JQA-R123", test_case_key="JQA-T123")
assert result["testCaseKey"] == "JQA-T123"
def test_create_test_result(self, requests_mock: Mocker):
"""Test creating a test result."""
requests_mock.post(f"{TestAdaptavist._adaptavist_api_url}/testrun/JQA-R123/testcase/JQA-T123/testresult", text=load_fixture("create_test_result.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
result = adaptavist.create_test_result(test_run_key="JQA-R123", test_case_key="JQA-T123", status=STATUS_PASS)
assert result == 123
# Test that executor and assignee are submitted as null if empty string is given
with patch("adaptavist.Adaptavist._post") as post:
adaptavist.create_test_result(test_run_key="JQA-R123", test_case_key="JQA-T123", status=STATUS_PASS, assignee="", executor="")
assert post.call_args_list[0][0][1]["assignedTo"] is None
assert post.call_args_list[0][0][1]["executedBy"] is None
# Test that optional fields are send if set
with patch("adaptavist.Adaptavist._post") as post:
adaptavist.create_test_result(test_run_key="JQA-R123", test_case_key="JQA-T123", status=STATUS_PASS, execute_time=3, issue_links=["JQA-123"])
assert post.call_args_list[0][0][1]["executionTime"] == 3000
assert post.call_args_list[0][0][1]["issueLinks"] == ["JQA-123"]
# Test that optional fields are not send if not set
with patch("adaptavist.Adaptavist._post") as post:
adaptavist.create_test_result(test_run_key="JQA-R123", test_case_key="JQA-T123", status=STATUS_PASS)
assert not hasattr(post.call_args_list[0][0][1], "executionTime")
assert not hasattr(post.call_args_list[0][0][1], "issueLinks")
def test_edit_test_result_status(self, requests_mock: Mocker):
"""Test creating a test result."""
requests_mock.put(f"{TestAdaptavist._adaptavist_api_url}/testrun/JQA-R123/testcase/JQA-T123/testresult")
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
assert adaptavist.edit_test_result_status(test_run_key="JQA-R123", test_case_key="JQA-T123", status=STATUS_FAIL)
# Test that executor and assignee are submitted as null if empty string is given
with patch("adaptavist.Adaptavist._put") as put:
adaptavist.edit_test_result_status(test_run_key="JQA-R123", test_case_key="JQA-T123", status=STATUS_FAIL, assignee="", executor="")
assert put.call_args_list[0][0][1]["assignedTo"] is None
assert put.call_args_list[0][0][1]["executedBy"] is None
# Test that optional fields are send if set
with patch("adaptavist.Adaptavist._put") as put:
adaptavist.edit_test_result_status(test_run_key="JQA-R123",
test_case_key="JQA-T123",
status=STATUS_FAIL,
environment="Firefox",
comment="Test",
execute_time=3,
issue_links=["JQA-123"])
assert put.call_args_list[0][0][1]["environment"] == "Firefox"
assert put.call_args_list[0][0][1]["comment"] == "Test"
assert put.call_args_list[0][0][1]["executionTime"] == 3000
assert put.call_args_list[0][0][1]["issueLinks"] == ["JQA-123"]
# Test that optional fields are not send if not set
with patch("adaptavist.Adaptavist._put") as put:
adaptavist.edit_test_result_status(test_run_key="JQA-R123", test_case_key="JQA-T123", status=STATUS_PASS)
assert not hasattr(put.call_args_list[0][0][1], "environment")
assert not hasattr(put.call_args_list[0][0][1], "comment")
assert not hasattr(put.call_args_list[0][0][1], "executionTime")
assert not hasattr(put.call_args_list[0][0][1], "issueLinks")
def test_get_test_result_attachement(self, requests_mock: Mocker):
"""Test getting test result attachments."""
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/testresult/123/attachments", text=load_fixture("get_test_result_attachments.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_test_result", return_value={"id": 123}):
attachments = adaptavist.get_test_result_attachment(test_run_key="JQA-R123", test_case_key="JQA-T123")
assert len(attachments) == 2
def test_add_test_result_attachment(self):
"""Test adding an attachment."""
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_test_result", return_value={"id": 123}), \
patch("builtins.open", mock_open()), \
patch("requests_toolbelt.MultipartEncoder"), \
patch("requests.post"):
assert adaptavist.add_test_result_attachment(test_run_key="JQA-R123", test_case_key="JQA-T123", attachment="testfile", filename="testfile")
# Test that a file name is needed, if no file handle is given
with patch("adaptavist.Adaptavist.get_test_result", return_value={"id": 123}), \
raises(SyntaxError):
assert adaptavist.add_test_result_attachment(test_run_key="JQA-R123", test_case_key="JQA-T123", attachment="testfile")
# Test that we can handle IO objects
with patch("adaptavist.Adaptavist.get_test_result", return_value={"id": 123}), \
patch("requests_toolbelt.MultipartEncoder"), \
patch("requests.post"):
attachment = BytesIO(b"Testdata")
attachment.name = "testdata.txt"
assert adaptavist.add_test_result_attachment(test_run_key="JQA-R123", test_case_key="JQA-T123", attachment=attachment)
def test_edit_test_script_status(self, requests_mock: Mocker):
"""Test editing a test stript."""
requests_mock.put(f"{TestAdaptavist._adaptavist_api_url}/testrun/JQA-R123/testcase/JQA-T123/testresult")
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_test_result",
return_value={
"id": 123, "status": STATUS_FAIL, "scriptResults": [{
"index": 0, "status": STATUS_FAIL
}]
}):
assert adaptavist.edit_test_script_status(test_run_key="JQA-R123", test_case_key="JQA-T123", step=1, status=STATUS_PASS)
# Test that executor and assignee are submitted as null if empty string is given
with patch("adaptavist.Adaptavist.get_test_result", return_value={"id": 123, "status": STATUS_FAIL}), \
patch("adaptavist.Adaptavist._put") as put:
adaptavist.edit_test_script_status(test_run_key="JQA-R123", test_case_key="JQA-T123", step=1, status=STATUS_PASS, assignee="", executor="")
assert put.call_args_list[0][0][1]["assignedTo"] is None
assert put.call_args_list[0][0][1]["executedBy"] is None
# Test that optional fields are send if set
with patch("adaptavist.Adaptavist.get_test_result", return_value={"id": 123, "status": STATUS_FAIL}), \
patch("adaptavist.Adaptavist._put") as put:
adaptavist.edit_test_script_status(test_run_key="JQA-R123",
test_case_key="JQA-T123",
step=1,
status=STATUS_PASS,
environment="Firefox",
assignee="Testuser",
executor="Testuser")
assert put.call_args_list[0][0][1]["environment"] == "Firefox"
assert put.call_args_list[0][0][1]["assignedTo"] == "Testuser"
assert put.call_args_list[0][0][1]["executedBy"] == "Testuser"
# Test that optional fields are not send if not set
with patch("adaptavist.Adaptavist.get_test_result", return_value={"id": 123, "status": STATUS_FAIL}), \
patch("adaptavist.Adaptavist._put") as put:
adaptavist.edit_test_script_status(test_run_key="JQA-R123", test_case_key="JQA-T123", step=1, status=STATUS_PASS)
assert not hasattr(put.call_args_list[0][0][1], "environment")
assert not hasattr(put.call_args_list[0][0][1], "assignedTo")
assert not hasattr(put.call_args_list[0][0][1], "executedBy")
def test_get_test_script_attachment(self, requests_mock: Mocker):
"""Test getting test script result attachments."""
requests_mock.get(f"{TestAdaptavist._adaptavist_api_url}/testresult/123/step/0/attachments", text=load_fixture("get_test_result_attachments.json"))
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_test_result", return_value={"id": 123}):
attachments = adaptavist.get_test_script_attachment(test_run_key="JQA-R123", test_case_key="JQA-T123", step=1)
assert len(attachments) == 2
def test_add_test_script_attachment(self):
"""Test adding an attachment."""
adaptavist = Adaptavist(jira_server=TestAdaptavist._jira_url, jira_username="User", jira_password="Password")
with patch("adaptavist.Adaptavist.get_test_result", return_value={"id": 123}), \
patch("builtins.open", mock_open()), \
patch("requests_toolbelt.MultipartEncoder"), \
patch("requests.post"):
assert adaptavist.add_test_script_attachment(test_run_key="JQA-R123", test_case_key="JQA-T123", step=1, attachment="testfile", filename="testfile")
# Test that a file name is needed, if no file handle is given
with patch("adaptavist.Adaptavist.get_test_result", return_value={"id": 123}), \
raises(SyntaxError):
assert adaptavist.add_test_script_attachment(test_run_key="JQA-R123", test_case_key="JQA-T123", step=1, attachment="testfile")
# Test that we can handle IO objects
with patch("adaptavist.Adaptavist.get_test_result", return_value={"id": 123}), \
patch("requests_toolbelt.MultipartEncoder"), \
patch("requests.post"):
attachment = BytesIO(b"Testdata")
attachment.name = "testdata.txt"
assert adaptavist.add_test_script_attachment(test_run_key="JQA-R123", test_case_key="JQA-T123", step=1, attachment=attachment)
| 64.54479
| 159
| 0.682943
| 4,546
| 35,306
| 5.038055
| 0.056533
| 0.100424
| 0.087325
| 0.060778
| 0.885474
| 0.86993
| 0.85098
| 0.815395
| 0.769463
| 0.749247
| 0
| 0.024009
| 0.189543
| 35,306
| 546
| 160
| 64.663004
| 0.776403
| 0.094432
| 0
| 0.432796
| 0
| 0.034946
| 0.286461
| 0.180416
| 0
| 0
| 0
| 0
| 0.252688
| 1
| 0.094086
| false
| 0.120968
| 0.021505
| 0
| 0.123656
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
00cf437c53a6909117975e46fbd7af7484ec2beb
| 3,087
|
py
|
Python
|
bench/drawingarea.py
|
Edlward/foc_esc
|
58425c0ce5865c077ce313bd672b971380f2edad
|
[
"MIT"
] | 67
|
2015-07-12T18:08:43.000Z
|
2022-03-05T07:05:01.000Z
|
bench/drawingarea.py
|
Edlward/foc_esc
|
58425c0ce5865c077ce313bd672b971380f2edad
|
[
"MIT"
] | 1
|
2015-09-08T14:01:46.000Z
|
2015-09-09T01:36:22.000Z
|
bench/drawingarea.py
|
gtoonstra/foc_esc
|
58425c0ce5865c077ce313bd672b971380f2edad
|
[
"MIT"
] | 44
|
2015-07-17T14:59:07.000Z
|
2021-02-20T13:55:14.000Z
|
from gi.repository import Gtk, Gdk, GObject
class Graph2(Gtk.DrawingArea):
def __init__ (self, ymin, ymax):
Gtk.DrawingArea.__init__(self)
self.set_size_request( 1024, 200 )
self.scale = 200.0 / (ymax+abs(ymin))
self.mid = ymax * self.scale
self.connect("draw", self.draw)
self.connect("configure_event", self.configure_event)
self.a1 = [self.mid] * 1024
self.a2 = [self.mid] * 1024
# Create a new backing pixmap of the appropriate size
def configure_event(self, widget, event):
rect = widget.get_allocation()
return True
def append( self, v1, v2 ):
self.a1.pop(0)
self.a1.append( self.mid - v1 * self.scale )
self.a2.pop(0)
self.a2.append( self.mid - v2 * self.scale )
self.queue_draw()
# Redraw the screen from the backing pixmap
# This method calls the operations going on in the model
def draw( self, widget, cr):
rect = widget.get_allocation()
cr.set_line_width(0.5)
cr.set_source_rgb(1.0, 0, 0)
cr.move_to( 0, self.a1[0] )
for i in range(1, len(self.a1)):
cr.line_to( i, self.a1[ i ] )
cr.stroke()
cr.set_source_rgb(0.0, 1.0, 0)
cr.move_to( 0, self.a2[0] )
for i in range(1, len(self.a2)):
cr.line_to( i, self.a2[ i ] )
cr.stroke()
return True
class Graph3(Gtk.DrawingArea):
def __init__ (self, ymin, ymax):
Gtk.DrawingArea.__init__(self)
self.set_size_request( 1024, 200 )
self.scale = 200.0 / (ymax+abs(ymin))
self.mid = ymax * self.scale
self.connect("draw", self.draw)
self.connect("configure_event", self.configure_event)
self.a1 = [self.mid] * 1024
self.a2 = [self.mid] * 1024
self.a3 = [self.mid] * 1024
# Create a new backing pixmap of the appropriate size
def configure_event(self, widget, event):
rect = widget.get_allocation()
return True
def append( self, v1, v2, v3 ):
self.a1.pop(0)
self.a1.append( self.mid - v1 * self.scale )
self.a2.pop(0)
self.a2.append( self.mid - v2 * self.scale )
self.a3.pop(0)
self.a3.append( self.mid - v3 * self.scale )
self.queue_draw()
# Redraw the screen from the backing pixmap
# This method calls the operations going on in the model
def draw( self, widget, cr):
rect = widget.get_allocation()
cr.set_line_width(0.5)
cr.set_source_rgb(1.0, 0, 0)
cr.move_to( 0, self.a1[0] )
for i in range(1, len(self.a1)):
cr.line_to( i, self.a1[ i ] )
cr.stroke()
cr.set_source_rgb(0.0, 1.0, 0)
cr.move_to( 0, self.a2[0] )
for i in range(1, len(self.a2)):
cr.line_to( i, self.a2[ i ] )
cr.stroke()
cr.set_source_rgb(0.0, 0.0, 1.0)
cr.move_to( 0, self.a3[0] )
for i in range(1, len(self.a3)):
cr.line_to( i, self.a3[ i ] )
cr.stroke()
return True
| 31.824742
| 61
| 0.567217
| 469
| 3,087
| 3.614072
| 0.162047
| 0.049558
| 0.053687
| 0.041298
| 0.940413
| 0.915634
| 0.907375
| 0.907375
| 0.895575
| 0.886136
| 0
| 0.06131
| 0.302559
| 3,087
| 96
| 62
| 32.15625
| 0.725964
| 0.09621
| 0
| 0.84
| 0
| 0
| 0.013654
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106667
| false
| 0
| 0.013333
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dab4579d702a848311339ac8977c14dccf25a458
| 238
|
py
|
Python
|
balebot/handlers/__init__.py
|
ehsanbarkhordar/reminder_bot
|
c72e664c71658dd2ec30bb610ffc836b53f000b9
|
[
"MIT"
] | 24
|
2018-07-08T12:17:55.000Z
|
2022-03-05T22:00:28.000Z
|
balebot/handlers/__init__.py
|
ehsanbarkhordar/reminder_bot
|
c72e664c71658dd2ec30bb610ffc836b53f000b9
|
[
"MIT"
] | 21
|
2018-08-30T03:18:25.000Z
|
2022-03-11T23:36:16.000Z
|
balebot/handlers/__init__.py
|
ehsanbarkhordar/reminder_bot
|
c72e664c71658dd2ec30bb610ffc836b53f000b9
|
[
"MIT"
] | 12
|
2018-07-07T08:29:12.000Z
|
2022-03-05T22:00:05.000Z
|
from balebot.handlers.handler import Handler
from balebot.handlers.command_handler import CommandHandler
from balebot.handlers.message_handler import MessageHandler
from balebot.handlers.quoted_message_handler import QuotedMessageHandler
| 47.6
| 72
| 0.89916
| 28
| 238
| 7.5
| 0.392857
| 0.209524
| 0.361905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067227
| 238
| 4
| 73
| 59.5
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
dadc05745a3021ec9080af010196fd98d5a2a847
| 216
|
py
|
Python
|
mlprogram/datasets/hearthstone/__init__.py
|
HiroakiMikami/mlprogram
|
573e94c567064705fa65267dd83946bf183197de
|
[
"MIT"
] | 9
|
2020-05-24T11:25:01.000Z
|
2022-03-28T15:32:10.000Z
|
mlprogram/datasets/hearthstone/__init__.py
|
HiroakiMikami/mlprogram
|
573e94c567064705fa65267dd83946bf183197de
|
[
"MIT"
] | 87
|
2020-05-09T08:56:55.000Z
|
2022-03-31T14:46:45.000Z
|
mlprogram/datasets/hearthstone/__init__.py
|
HiroakiMikami/NL2Prog
|
573e94c567064705fa65267dd83946bf183197de
|
[
"MIT"
] | 3
|
2021-02-22T20:38:29.000Z
|
2021-11-11T18:48:44.000Z
|
from mlprogram.datasets.hearthstone.download import download # noqa
from mlprogram.datasets.hearthstone.functions import SplitValue # noqa
from mlprogram.datasets.hearthstone.functions import TokenizeQuery # noqa
| 54
| 74
| 0.847222
| 24
| 216
| 7.625
| 0.416667
| 0.213115
| 0.344262
| 0.52459
| 0.557377
| 0.557377
| 0.557377
| 0
| 0
| 0
| 0
| 0
| 0.097222
| 216
| 3
| 75
| 72
| 0.938462
| 0.064815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
daec11661098ac8cb670594f9f557bee239e2ac9
| 146
|
py
|
Python
|
modules/sr/robot/utils.py
|
13ros27/competition-simulator
|
4dfea0e92c12fa9e9656ce273db3d240aee34bc4
|
[
"MIT"
] | null | null | null |
modules/sr/robot/utils.py
|
13ros27/competition-simulator
|
4dfea0e92c12fa9e9656ce273db3d240aee34bc4
|
[
"MIT"
] | null | null | null |
modules/sr/robot/utils.py
|
13ros27/competition-simulator
|
4dfea0e92c12fa9e9656ce273db3d240aee34bc4
|
[
"MIT"
] | null | null | null |
def map_to_range(old_min, old_max, new_min, new_max, value):
return ((value - old_min) / (old_max - old_min)) * (new_max - new_min) + new_min
| 48.666667
| 84
| 0.691781
| 27
| 146
| 3.296296
| 0.37037
| 0.202247
| 0.202247
| 0.269663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 146
| 2
| 85
| 73
| 0.729508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
977fc2a804cfdbabfb3f6020363ede8bc259b052
| 58,714
|
py
|
Python
|
sdk/python/pulumi_f5bigip/ltm/monitor.py
|
pulumi/pulumi-f5bigip
|
4bce074f8bd7cb42f359ef4814ca5b437230fd1c
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2018-12-21T23:30:33.000Z
|
2021-10-12T16:38:27.000Z
|
sdk/python/pulumi_f5bigip/ltm/monitor.py
|
pulumi/pulumi-f5bigip
|
4bce074f8bd7cb42f359ef4814ca5b437230fd1c
|
[
"ECL-2.0",
"Apache-2.0"
] | 61
|
2019-01-09T01:50:19.000Z
|
2022-03-31T15:27:17.000Z
|
sdk/python/pulumi_f5bigip/ltm/monitor.py
|
pulumi/pulumi-f5bigip
|
4bce074f8bd7cb42f359ef4814ca5b437230fd1c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-10-05T10:36:30.000Z
|
2019-10-05T10:36:30.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['MonitorArgs', 'Monitor']
@pulumi.input_type
class MonitorArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parent: pulumi.Input[str],
adaptive: Optional[pulumi.Input[str]] = None,
adaptive_limit: Optional[pulumi.Input[int]] = None,
compatibility: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
filename: Optional[pulumi.Input[str]] = None,
interval: Optional[pulumi.Input[int]] = None,
ip_dscp: Optional[pulumi.Input[int]] = None,
manual_resume: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
receive: Optional[pulumi.Input[str]] = None,
receive_disable: Optional[pulumi.Input[str]] = None,
reverse: Optional[pulumi.Input[str]] = None,
send: Optional[pulumi.Input[str]] = None,
time_until_up: Optional[pulumi.Input[int]] = None,
timeout: Optional[pulumi.Input[int]] = None,
transparent: Optional[pulumi.Input[str]] = None,
up_interval: Optional[pulumi.Input[int]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Monitor resource.
:param pulumi.Input[str] name: Specifies the Name of the LTM Monitor.Name of Monitor should be full path,full path is the combination of the `partition + monitor name`,For ex:`/Common/test-ltm-monitor`.
:param pulumi.Input[str] parent: Parent monitor for the system to use for setting initial values for the new monitor.
:param pulumi.Input[str] adaptive: Specifies whether adaptive response time monitoring is enabled for this monitor. The default is `disabled`.
:param pulumi.Input[int] adaptive_limit: Specifies the absolute number of milliseconds that may not be exceeded by a monitor probe, regardless of Allowed Divergence.
:param pulumi.Input[str] compatibility: Specifies, when enabled, that the SSL options setting (in OpenSSL) is set to ALL. Accepts 'enabled' or 'disabled' values, the default value is 'enabled'.
:param pulumi.Input[str] database: Specifies the database in which the user is created
:param pulumi.Input[str] destination: Specify an alias address for monitoring
:param pulumi.Input[str] filename: Specifies the full path and file name of the file that the system attempts to download. The health check is successful if the system can download the file.
:param pulumi.Input[int] interval: Specifies, in seconds, the frequency at which the system issues the monitor check when either the resource is down or the status of the resource is unknown. The default is `5`
:param pulumi.Input[int] ip_dscp: Displays the differentiated services code point (DSCP).The default is `0 (zero)`.
:param pulumi.Input[str] manual_resume: Specifies whether the system automatically changes the status of a resource to Enabled at the next successful monitor check.
:param pulumi.Input[str] mode: Specifies the data transfer process (DTP) mode. The default value is passive. The options are passive (Specifies that the monitor sends a data transfer request to the FTP server. When the FTP server receives the request, the FTP server then initiates and establishes the data connection.) and active (Specifies that the monitor initiates and establishes the data connection with the FTP server.).
:param pulumi.Input[str] password: Specifies the password if the monitored target requires authentication
:param pulumi.Input[str] receive: Specifies the regular expression representing the text string that the monitor looks for in the returned resource.
:param pulumi.Input[str] receive_disable: The system marks the node or pool member disabled when its response matches Receive Disable String but not Receive String.
:param pulumi.Input[str] reverse: Instructs the system to mark the target resource down when the test is successful.
:param pulumi.Input[str] send: Specifies the text string that the monitor sends to the target object.
:param pulumi.Input[int] time_until_up: Specifies the number of seconds to wait after a resource first responds correctly to the monitor before setting the resource to up.
:param pulumi.Input[int] timeout: Specifies the number of seconds the target has in which to respond to the monitor request. The default is `16` seconds
:param pulumi.Input[str] transparent: Specifies whether the monitor operates in transparent mode.
:param pulumi.Input[int] up_interval: Specifies the interval for the system to use to perform the health check when a resource is up. The default is `0(Disabled)`
:param pulumi.Input[str] username: Specifies the user name if the monitored target requires authentication
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "parent", parent)
if adaptive is not None:
pulumi.set(__self__, "adaptive", adaptive)
if adaptive_limit is not None:
pulumi.set(__self__, "adaptive_limit", adaptive_limit)
if compatibility is not None:
pulumi.set(__self__, "compatibility", compatibility)
if database is not None:
pulumi.set(__self__, "database", database)
if destination is not None:
pulumi.set(__self__, "destination", destination)
if filename is not None:
pulumi.set(__self__, "filename", filename)
if interval is not None:
pulumi.set(__self__, "interval", interval)
if ip_dscp is not None:
pulumi.set(__self__, "ip_dscp", ip_dscp)
if manual_resume is not None:
pulumi.set(__self__, "manual_resume", manual_resume)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if password is not None:
pulumi.set(__self__, "password", password)
if receive is not None:
pulumi.set(__self__, "receive", receive)
if receive_disable is not None:
pulumi.set(__self__, "receive_disable", receive_disable)
if reverse is not None:
pulumi.set(__self__, "reverse", reverse)
if send is not None:
pulumi.set(__self__, "send", send)
if time_until_up is not None:
pulumi.set(__self__, "time_until_up", time_until_up)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
if transparent is not None:
pulumi.set(__self__, "transparent", transparent)
if up_interval is not None:
pulumi.set(__self__, "up_interval", up_interval)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Specifies the Name of the LTM Monitor.Name of Monitor should be full path,full path is the combination of the `partition + monitor name`,For ex:`/Common/test-ltm-monitor`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parent(self) -> pulumi.Input[str]:
"""
Parent monitor for the system to use for setting initial values for the new monitor.
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: pulumi.Input[str]):
pulumi.set(self, "parent", value)
@property
@pulumi.getter
def adaptive(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether adaptive response time monitoring is enabled for this monitor. The default is `disabled`.
"""
return pulumi.get(self, "adaptive")
@adaptive.setter
def adaptive(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adaptive", value)
@property
@pulumi.getter(name="adaptiveLimit")
def adaptive_limit(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the absolute number of milliseconds that may not be exceeded by a monitor probe, regardless of Allowed Divergence.
"""
return pulumi.get(self, "adaptive_limit")
@adaptive_limit.setter
def adaptive_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "adaptive_limit", value)
@property
@pulumi.getter
def compatibility(self) -> Optional[pulumi.Input[str]]:
"""
Specifies, when enabled, that the SSL options setting (in OpenSSL) is set to ALL. Accepts 'enabled' or 'disabled' values, the default value is 'enabled'.
"""
return pulumi.get(self, "compatibility")
@compatibility.setter
def compatibility(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compatibility", value)
@property
@pulumi.getter
def database(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the database in which the user is created
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database", value)
@property
@pulumi.getter
def destination(self) -> Optional[pulumi.Input[str]]:
"""
Specify an alias address for monitoring
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter
def filename(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the full path and file name of the file that the system attempts to download. The health check is successful if the system can download the file.
"""
return pulumi.get(self, "filename")
@filename.setter
def filename(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filename", value)
@property
@pulumi.getter
def interval(self) -> Optional[pulumi.Input[int]]:
"""
Specifies, in seconds, the frequency at which the system issues the monitor check when either the resource is down or the status of the resource is unknown. The default is `5`
"""
return pulumi.get(self, "interval")
@interval.setter
def interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval", value)
@property
@pulumi.getter(name="ipDscp")
def ip_dscp(self) -> Optional[pulumi.Input[int]]:
"""
Displays the differentiated services code point (DSCP).The default is `0 (zero)`.
"""
return pulumi.get(self, "ip_dscp")
@ip_dscp.setter
def ip_dscp(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ip_dscp", value)
@property
@pulumi.getter(name="manualResume")
def manual_resume(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether the system automatically changes the status of a resource to Enabled at the next successful monitor check.
"""
return pulumi.get(self, "manual_resume")
@manual_resume.setter
def manual_resume(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "manual_resume", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the data transfer process (DTP) mode. The default value is passive. The options are passive (Specifies that the monitor sends a data transfer request to the FTP server. When the FTP server receives the request, the FTP server then initiates and establishes the data connection.) and active (Specifies that the monitor initiates and establishes the data connection with the FTP server.).
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the password if the monitored target requires authentication
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def receive(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the regular expression representing the text string that the monitor looks for in the returned resource.
"""
return pulumi.get(self, "receive")
@receive.setter
def receive(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "receive", value)
@property
@pulumi.getter(name="receiveDisable")
def receive_disable(self) -> Optional[pulumi.Input[str]]:
"""
The system marks the node or pool member disabled when its response matches Receive Disable String but not Receive String.
"""
return pulumi.get(self, "receive_disable")
@receive_disable.setter
def receive_disable(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "receive_disable", value)
@property
@pulumi.getter
def reverse(self) -> Optional[pulumi.Input[str]]:
"""
Instructs the system to mark the target resource down when the test is successful.
"""
return pulumi.get(self, "reverse")
@reverse.setter
def reverse(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reverse", value)
@property
@pulumi.getter
def send(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the text string that the monitor sends to the target object.
"""
return pulumi.get(self, "send")
@send.setter
def send(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "send", value)
@property
@pulumi.getter(name="timeUntilUp")
def time_until_up(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the number of seconds to wait after a resource first responds correctly to the monitor before setting the resource to up.
"""
return pulumi.get(self, "time_until_up")
@time_until_up.setter
def time_until_up(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "time_until_up", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the number of seconds the target has in which to respond to the monitor request. The default is `16` seconds
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout", value)
@property
@pulumi.getter
def transparent(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether the monitor operates in transparent mode.
"""
return pulumi.get(self, "transparent")
@transparent.setter
def transparent(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "transparent", value)
@property
@pulumi.getter(name="upInterval")
def up_interval(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the interval for the system to use to perform the health check when a resource is up. The default is `0(Disabled)`
"""
return pulumi.get(self, "up_interval")
@up_interval.setter
def up_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "up_interval", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the user name if the monitored target requires authentication
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@pulumi.input_type
class _MonitorState:
def __init__(__self__, *,
adaptive: Optional[pulumi.Input[str]] = None,
adaptive_limit: Optional[pulumi.Input[int]] = None,
compatibility: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
filename: Optional[pulumi.Input[str]] = None,
interval: Optional[pulumi.Input[int]] = None,
ip_dscp: Optional[pulumi.Input[int]] = None,
manual_resume: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
receive: Optional[pulumi.Input[str]] = None,
receive_disable: Optional[pulumi.Input[str]] = None,
reverse: Optional[pulumi.Input[str]] = None,
send: Optional[pulumi.Input[str]] = None,
time_until_up: Optional[pulumi.Input[int]] = None,
timeout: Optional[pulumi.Input[int]] = None,
transparent: Optional[pulumi.Input[str]] = None,
up_interval: Optional[pulumi.Input[int]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Monitor resources.
:param pulumi.Input[str] adaptive: Specifies whether adaptive response time monitoring is enabled for this monitor. The default is `disabled`.
:param pulumi.Input[int] adaptive_limit: Specifies the absolute number of milliseconds that may not be exceeded by a monitor probe, regardless of Allowed Divergence.
:param pulumi.Input[str] compatibility: Specifies, when enabled, that the SSL options setting (in OpenSSL) is set to ALL. Accepts 'enabled' or 'disabled' values, the default value is 'enabled'.
:param pulumi.Input[str] database: Specifies the database in which the user is created
:param pulumi.Input[str] destination: Specify an alias address for monitoring
:param pulumi.Input[str] filename: Specifies the full path and file name of the file that the system attempts to download. The health check is successful if the system can download the file.
:param pulumi.Input[int] interval: Specifies, in seconds, the frequency at which the system issues the monitor check when either the resource is down or the status of the resource is unknown. The default is `5`
:param pulumi.Input[int] ip_dscp: Displays the differentiated services code point (DSCP).The default is `0 (zero)`.
:param pulumi.Input[str] manual_resume: Specifies whether the system automatically changes the status of a resource to Enabled at the next successful monitor check.
:param pulumi.Input[str] mode: Specifies the data transfer process (DTP) mode. The default value is passive. The options are passive (Specifies that the monitor sends a data transfer request to the FTP server. When the FTP server receives the request, the FTP server then initiates and establishes the data connection.) and active (Specifies that the monitor initiates and establishes the data connection with the FTP server.).
:param pulumi.Input[str] name: Specifies the Name of the LTM Monitor.Name of Monitor should be full path,full path is the combination of the `partition + monitor name`,For ex:`/Common/test-ltm-monitor`.
:param pulumi.Input[str] parent: Parent monitor for the system to use for setting initial values for the new monitor.
:param pulumi.Input[str] password: Specifies the password if the monitored target requires authentication
:param pulumi.Input[str] receive: Specifies the regular expression representing the text string that the monitor looks for in the returned resource.
:param pulumi.Input[str] receive_disable: The system marks the node or pool member disabled when its response matches Receive Disable String but not Receive String.
:param pulumi.Input[str] reverse: Instructs the system to mark the target resource down when the test is successful.
:param pulumi.Input[str] send: Specifies the text string that the monitor sends to the target object.
:param pulumi.Input[int] time_until_up: Specifies the number of seconds to wait after a resource first responds correctly to the monitor before setting the resource to up.
:param pulumi.Input[int] timeout: Specifies the number of seconds the target has in which to respond to the monitor request. The default is `16` seconds
:param pulumi.Input[str] transparent: Specifies whether the monitor operates in transparent mode.
:param pulumi.Input[int] up_interval: Specifies the interval for the system to use to perform the health check when a resource is up. The default is `0(Disabled)`
:param pulumi.Input[str] username: Specifies the user name if the monitored target requires authentication
"""
if adaptive is not None:
pulumi.set(__self__, "adaptive", adaptive)
if adaptive_limit is not None:
pulumi.set(__self__, "adaptive_limit", adaptive_limit)
if compatibility is not None:
pulumi.set(__self__, "compatibility", compatibility)
if database is not None:
pulumi.set(__self__, "database", database)
if destination is not None:
pulumi.set(__self__, "destination", destination)
if filename is not None:
pulumi.set(__self__, "filename", filename)
if interval is not None:
pulumi.set(__self__, "interval", interval)
if ip_dscp is not None:
pulumi.set(__self__, "ip_dscp", ip_dscp)
if manual_resume is not None:
pulumi.set(__self__, "manual_resume", manual_resume)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if name is not None:
pulumi.set(__self__, "name", name)
if parent is not None:
pulumi.set(__self__, "parent", parent)
if password is not None:
pulumi.set(__self__, "password", password)
if receive is not None:
pulumi.set(__self__, "receive", receive)
if receive_disable is not None:
pulumi.set(__self__, "receive_disable", receive_disable)
if reverse is not None:
pulumi.set(__self__, "reverse", reverse)
if send is not None:
pulumi.set(__self__, "send", send)
if time_until_up is not None:
pulumi.set(__self__, "time_until_up", time_until_up)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
if transparent is not None:
pulumi.set(__self__, "transparent", transparent)
if up_interval is not None:
pulumi.set(__self__, "up_interval", up_interval)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def adaptive(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether adaptive response time monitoring is enabled for this monitor. The default is `disabled`.
"""
return pulumi.get(self, "adaptive")
@adaptive.setter
def adaptive(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adaptive", value)
@property
@pulumi.getter(name="adaptiveLimit")
def adaptive_limit(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the absolute number of milliseconds that may not be exceeded by a monitor probe, regardless of Allowed Divergence.
"""
return pulumi.get(self, "adaptive_limit")
@adaptive_limit.setter
def adaptive_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "adaptive_limit", value)
@property
@pulumi.getter
def compatibility(self) -> Optional[pulumi.Input[str]]:
"""
Specifies, when enabled, that the SSL options setting (in OpenSSL) is set to ALL. Accepts 'enabled' or 'disabled' values, the default value is 'enabled'.
"""
return pulumi.get(self, "compatibility")
@compatibility.setter
def compatibility(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compatibility", value)
@property
@pulumi.getter
def database(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the database in which the user is created
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database", value)
@property
@pulumi.getter
def destination(self) -> Optional[pulumi.Input[str]]:
"""
Specify an alias address for monitoring
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter
def filename(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the full path and file name of the file that the system attempts to download. The health check is successful if the system can download the file.
"""
return pulumi.get(self, "filename")
@filename.setter
def filename(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filename", value)
@property
@pulumi.getter
def interval(self) -> Optional[pulumi.Input[int]]:
"""
Specifies, in seconds, the frequency at which the system issues the monitor check when either the resource is down or the status of the resource is unknown. The default is `5`
"""
return pulumi.get(self, "interval")
@interval.setter
def interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval", value)
@property
@pulumi.getter(name="ipDscp")
def ip_dscp(self) -> Optional[pulumi.Input[int]]:
"""
Displays the differentiated services code point (DSCP).The default is `0 (zero)`.
"""
return pulumi.get(self, "ip_dscp")
@ip_dscp.setter
def ip_dscp(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ip_dscp", value)
@property
@pulumi.getter(name="manualResume")
def manual_resume(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether the system automatically changes the status of a resource to Enabled at the next successful monitor check.
"""
return pulumi.get(self, "manual_resume")
@manual_resume.setter
def manual_resume(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "manual_resume", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the data transfer process (DTP) mode. The default value is passive. The options are passive (Specifies that the monitor sends a data transfer request to the FTP server. When the FTP server receives the request, the FTP server then initiates and establishes the data connection.) and active (Specifies that the monitor initiates and establishes the data connection with the FTP server.).
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the Name of the LTM Monitor.Name of Monitor should be full path,full path is the combination of the `partition + monitor name`,For ex:`/Common/test-ltm-monitor`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parent(self) -> Optional[pulumi.Input[str]]:
"""
Parent monitor for the system to use for setting initial values for the new monitor.
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the password if the monitored target requires authentication
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def receive(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the regular expression representing the text string that the monitor looks for in the returned resource.
"""
return pulumi.get(self, "receive")
@receive.setter
def receive(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "receive", value)
@property
@pulumi.getter(name="receiveDisable")
def receive_disable(self) -> Optional[pulumi.Input[str]]:
"""
The system marks the node or pool member disabled when its response matches Receive Disable String but not Receive String.
"""
return pulumi.get(self, "receive_disable")
@receive_disable.setter
def receive_disable(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "receive_disable", value)
@property
@pulumi.getter
def reverse(self) -> Optional[pulumi.Input[str]]:
"""
Instructs the system to mark the target resource down when the test is successful.
"""
return pulumi.get(self, "reverse")
@reverse.setter
def reverse(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reverse", value)
@property
@pulumi.getter
def send(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the text string that the monitor sends to the target object.
"""
return pulumi.get(self, "send")
@send.setter
def send(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "send", value)
@property
@pulumi.getter(name="timeUntilUp")
def time_until_up(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the number of seconds to wait after a resource first responds correctly to the monitor before setting the resource to up.
"""
return pulumi.get(self, "time_until_up")
@time_until_up.setter
def time_until_up(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "time_until_up", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the number of seconds the target has in which to respond to the monitor request. The default is `16` seconds
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout", value)
@property
@pulumi.getter
def transparent(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether the monitor operates in transparent mode.
"""
return pulumi.get(self, "transparent")
@transparent.setter
def transparent(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "transparent", value)
@property
@pulumi.getter(name="upInterval")
def up_interval(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the interval for the system to use to perform the health check when a resource is up. The default is `0(Disabled)`
"""
return pulumi.get(self, "up_interval")
@up_interval.setter
def up_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "up_interval", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the user name if the monitored target requires authentication
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
class Monitor(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
adaptive: Optional[pulumi.Input[str]] = None,
adaptive_limit: Optional[pulumi.Input[int]] = None,
compatibility: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
filename: Optional[pulumi.Input[str]] = None,
interval: Optional[pulumi.Input[int]] = None,
ip_dscp: Optional[pulumi.Input[int]] = None,
manual_resume: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
receive: Optional[pulumi.Input[str]] = None,
receive_disable: Optional[pulumi.Input[str]] = None,
reverse: Optional[pulumi.Input[str]] = None,
send: Optional[pulumi.Input[str]] = None,
time_until_up: Optional[pulumi.Input[int]] = None,
timeout: Optional[pulumi.Input[int]] = None,
transparent: Optional[pulumi.Input[str]] = None,
up_interval: Optional[pulumi.Input[int]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
`ltm.Monitor` Configures a custom monitor for use by health checks.
For resources should be named with their "full path". The full path is the combination of the partition + name of the resource. For example /Common/my-pool.
## Example Usage
```python
import pulumi
import pulumi_f5bigip as f5bigip
monitor = f5bigip.ltm.Monitor("monitor",
destination="1.2.3.4:1234",
interval=999,
name="/Common/terraform_monitor",
parent="/Common/http",
send=\"\"\"GET /some/path
\"\"\",
timeout=999)
test_ftp_monitor = f5bigip.ltm.Monitor("test-ftp-monitor",
destination="*:8008",
filename="somefile",
interval=5,
name="/Common/ftp-test",
parent="/Common/ftp",
time_until_up=0,
timeout=16)
test_postgresql_monitor = f5bigip.ltm.Monitor("test-postgresql-monitor",
interval=5,
name="/Common/test-postgresql-monitor",
parent="/Common/postgresql",
password="abcd1234",
receive="Test",
send="SELECT 'Test';",
timeout=16,
username="abcd")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] adaptive: Specifies whether adaptive response time monitoring is enabled for this monitor. The default is `disabled`.
:param pulumi.Input[int] adaptive_limit: Specifies the absolute number of milliseconds that may not be exceeded by a monitor probe, regardless of Allowed Divergence.
:param pulumi.Input[str] compatibility: Specifies, when enabled, that the SSL options setting (in OpenSSL) is set to ALL. Accepts 'enabled' or 'disabled' values, the default value is 'enabled'.
:param pulumi.Input[str] database: Specifies the database in which the user is created
:param pulumi.Input[str] destination: Specify an alias address for monitoring
:param pulumi.Input[str] filename: Specifies the full path and file name of the file that the system attempts to download. The health check is successful if the system can download the file.
:param pulumi.Input[int] interval: Specifies, in seconds, the frequency at which the system issues the monitor check when either the resource is down or the status of the resource is unknown. The default is `5`
:param pulumi.Input[int] ip_dscp: Displays the differentiated services code point (DSCP).The default is `0 (zero)`.
:param pulumi.Input[str] manual_resume: Specifies whether the system automatically changes the status of a resource to Enabled at the next successful monitor check.
:param pulumi.Input[str] mode: Specifies the data transfer process (DTP) mode. The default value is passive. The options are passive (Specifies that the monitor sends a data transfer request to the FTP server. When the FTP server receives the request, the FTP server then initiates and establishes the data connection.) and active (Specifies that the monitor initiates and establishes the data connection with the FTP server.).
:param pulumi.Input[str] name: Specifies the Name of the LTM Monitor.Name of Monitor should be full path,full path is the combination of the `partition + monitor name`,For ex:`/Common/test-ltm-monitor`.
:param pulumi.Input[str] parent: Parent monitor for the system to use for setting initial values for the new monitor.
:param pulumi.Input[str] password: Specifies the password if the monitored target requires authentication
:param pulumi.Input[str] receive: Specifies the regular expression representing the text string that the monitor looks for in the returned resource.
:param pulumi.Input[str] receive_disable: The system marks the node or pool member disabled when its response matches Receive Disable String but not Receive String.
:param pulumi.Input[str] reverse: Instructs the system to mark the target resource down when the test is successful.
:param pulumi.Input[str] send: Specifies the text string that the monitor sends to the target object.
:param pulumi.Input[int] time_until_up: Specifies the number of seconds to wait after a resource first responds correctly to the monitor before setting the resource to up.
:param pulumi.Input[int] timeout: Specifies the number of seconds the target has in which to respond to the monitor request. The default is `16` seconds
:param pulumi.Input[str] transparent: Specifies whether the monitor operates in transparent mode.
:param pulumi.Input[int] up_interval: Specifies the interval for the system to use to perform the health check when a resource is up. The default is `0(Disabled)`
:param pulumi.Input[str] username: Specifies the user name if the monitored target requires authentication
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MonitorArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
`ltm.Monitor` Configures a custom monitor for use by health checks.
For resources should be named with their "full path". The full path is the combination of the partition + name of the resource. For example /Common/my-pool.
## Example Usage
```python
import pulumi
import pulumi_f5bigip as f5bigip
monitor = f5bigip.ltm.Monitor("monitor",
destination="1.2.3.4:1234",
interval=999,
name="/Common/terraform_monitor",
parent="/Common/http",
send=\"\"\"GET /some/path
\"\"\",
timeout=999)
test_ftp_monitor = f5bigip.ltm.Monitor("test-ftp-monitor",
destination="*:8008",
filename="somefile",
interval=5,
name="/Common/ftp-test",
parent="/Common/ftp",
time_until_up=0,
timeout=16)
test_postgresql_monitor = f5bigip.ltm.Monitor("test-postgresql-monitor",
interval=5,
name="/Common/test-postgresql-monitor",
parent="/Common/postgresql",
password="abcd1234",
receive="Test",
send="SELECT 'Test';",
timeout=16,
username="abcd")
```
:param str resource_name: The name of the resource.
:param MonitorArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MonitorArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
adaptive: Optional[pulumi.Input[str]] = None,
adaptive_limit: Optional[pulumi.Input[int]] = None,
compatibility: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
filename: Optional[pulumi.Input[str]] = None,
interval: Optional[pulumi.Input[int]] = None,
ip_dscp: Optional[pulumi.Input[int]] = None,
manual_resume: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
receive: Optional[pulumi.Input[str]] = None,
receive_disable: Optional[pulumi.Input[str]] = None,
reverse: Optional[pulumi.Input[str]] = None,
send: Optional[pulumi.Input[str]] = None,
time_until_up: Optional[pulumi.Input[int]] = None,
timeout: Optional[pulumi.Input[int]] = None,
transparent: Optional[pulumi.Input[str]] = None,
up_interval: Optional[pulumi.Input[int]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MonitorArgs.__new__(MonitorArgs)
__props__.__dict__["adaptive"] = adaptive
__props__.__dict__["adaptive_limit"] = adaptive_limit
__props__.__dict__["compatibility"] = compatibility
__props__.__dict__["database"] = database
__props__.__dict__["destination"] = destination
__props__.__dict__["filename"] = filename
__props__.__dict__["interval"] = interval
__props__.__dict__["ip_dscp"] = ip_dscp
__props__.__dict__["manual_resume"] = manual_resume
__props__.__dict__["mode"] = mode
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
if parent is None and not opts.urn:
raise TypeError("Missing required property 'parent'")
__props__.__dict__["parent"] = parent
__props__.__dict__["password"] = password
__props__.__dict__["receive"] = receive
__props__.__dict__["receive_disable"] = receive_disable
__props__.__dict__["reverse"] = reverse
__props__.__dict__["send"] = send
__props__.__dict__["time_until_up"] = time_until_up
__props__.__dict__["timeout"] = timeout
__props__.__dict__["transparent"] = transparent
__props__.__dict__["up_interval"] = up_interval
__props__.__dict__["username"] = username
super(Monitor, __self__).__init__(
'f5bigip:ltm/monitor:Monitor',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
adaptive: Optional[pulumi.Input[str]] = None,
adaptive_limit: Optional[pulumi.Input[int]] = None,
compatibility: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
filename: Optional[pulumi.Input[str]] = None,
interval: Optional[pulumi.Input[int]] = None,
ip_dscp: Optional[pulumi.Input[int]] = None,
manual_resume: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
receive: Optional[pulumi.Input[str]] = None,
receive_disable: Optional[pulumi.Input[str]] = None,
reverse: Optional[pulumi.Input[str]] = None,
send: Optional[pulumi.Input[str]] = None,
time_until_up: Optional[pulumi.Input[int]] = None,
timeout: Optional[pulumi.Input[int]] = None,
transparent: Optional[pulumi.Input[str]] = None,
up_interval: Optional[pulumi.Input[int]] = None,
username: Optional[pulumi.Input[str]] = None) -> 'Monitor':
"""
Get an existing Monitor resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] adaptive: Specifies whether adaptive response time monitoring is enabled for this monitor. The default is `disabled`.
:param pulumi.Input[int] adaptive_limit: Specifies the absolute number of milliseconds that may not be exceeded by a monitor probe, regardless of Allowed Divergence.
:param pulumi.Input[str] compatibility: Specifies, when enabled, that the SSL options setting (in OpenSSL) is set to ALL. Accepts 'enabled' or 'disabled' values, the default value is 'enabled'.
:param pulumi.Input[str] database: Specifies the database in which the user is created
:param pulumi.Input[str] destination: Specify an alias address for monitoring
:param pulumi.Input[str] filename: Specifies the full path and file name of the file that the system attempts to download. The health check is successful if the system can download the file.
:param pulumi.Input[int] interval: Specifies, in seconds, the frequency at which the system issues the monitor check when either the resource is down or the status of the resource is unknown. The default is `5`
:param pulumi.Input[int] ip_dscp: Displays the differentiated services code point (DSCP).The default is `0 (zero)`.
:param pulumi.Input[str] manual_resume: Specifies whether the system automatically changes the status of a resource to Enabled at the next successful monitor check.
:param pulumi.Input[str] mode: Specifies the data transfer process (DTP) mode. The default value is passive. The options are passive (Specifies that the monitor sends a data transfer request to the FTP server. When the FTP server receives the request, the FTP server then initiates and establishes the data connection.) and active (Specifies that the monitor initiates and establishes the data connection with the FTP server.).
:param pulumi.Input[str] name: Specifies the Name of the LTM Monitor.Name of Monitor should be full path,full path is the combination of the `partition + monitor name`,For ex:`/Common/test-ltm-monitor`.
:param pulumi.Input[str] parent: Parent monitor for the system to use for setting initial values for the new monitor.
:param pulumi.Input[str] password: Specifies the password if the monitored target requires authentication
:param pulumi.Input[str] receive: Specifies the regular expression representing the text string that the monitor looks for in the returned resource.
:param pulumi.Input[str] receive_disable: The system marks the node or pool member disabled when its response matches Receive Disable String but not Receive String.
:param pulumi.Input[str] reverse: Instructs the system to mark the target resource down when the test is successful.
:param pulumi.Input[str] send: Specifies the text string that the monitor sends to the target object.
:param pulumi.Input[int] time_until_up: Specifies the number of seconds to wait after a resource first responds correctly to the monitor before setting the resource to up.
:param pulumi.Input[int] timeout: Specifies the number of seconds the target has in which to respond to the monitor request. The default is `16` seconds
:param pulumi.Input[str] transparent: Specifies whether the monitor operates in transparent mode.
:param pulumi.Input[int] up_interval: Specifies the interval for the system to use to perform the health check when a resource is up. The default is `0(Disabled)`
:param pulumi.Input[str] username: Specifies the user name if the monitored target requires authentication
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _MonitorState.__new__(_MonitorState)
__props__.__dict__["adaptive"] = adaptive
__props__.__dict__["adaptive_limit"] = adaptive_limit
__props__.__dict__["compatibility"] = compatibility
__props__.__dict__["database"] = database
__props__.__dict__["destination"] = destination
__props__.__dict__["filename"] = filename
__props__.__dict__["interval"] = interval
__props__.__dict__["ip_dscp"] = ip_dscp
__props__.__dict__["manual_resume"] = manual_resume
__props__.__dict__["mode"] = mode
__props__.__dict__["name"] = name
__props__.__dict__["parent"] = parent
__props__.__dict__["password"] = password
__props__.__dict__["receive"] = receive
__props__.__dict__["receive_disable"] = receive_disable
__props__.__dict__["reverse"] = reverse
__props__.__dict__["send"] = send
__props__.__dict__["time_until_up"] = time_until_up
__props__.__dict__["timeout"] = timeout
__props__.__dict__["transparent"] = transparent
__props__.__dict__["up_interval"] = up_interval
__props__.__dict__["username"] = username
return Monitor(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def adaptive(self) -> pulumi.Output[str]:
"""
Specifies whether adaptive response time monitoring is enabled for this monitor. The default is `disabled`.
"""
return pulumi.get(self, "adaptive")
@property
@pulumi.getter(name="adaptiveLimit")
def adaptive_limit(self) -> pulumi.Output[int]:
"""
Specifies the absolute number of milliseconds that may not be exceeded by a monitor probe, regardless of Allowed Divergence.
"""
return pulumi.get(self, "adaptive_limit")
@property
@pulumi.getter
def compatibility(self) -> pulumi.Output[Optional[str]]:
"""
Specifies, when enabled, that the SSL options setting (in OpenSSL) is set to ALL. Accepts 'enabled' or 'disabled' values, the default value is 'enabled'.
"""
return pulumi.get(self, "compatibility")
@property
@pulumi.getter
def database(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the database in which the user is created
"""
return pulumi.get(self, "database")
@property
@pulumi.getter
def destination(self) -> pulumi.Output[str]:
"""
Specify an alias address for monitoring
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter
def filename(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the full path and file name of the file that the system attempts to download. The health check is successful if the system can download the file.
"""
return pulumi.get(self, "filename")
@property
@pulumi.getter
def interval(self) -> pulumi.Output[int]:
"""
Specifies, in seconds, the frequency at which the system issues the monitor check when either the resource is down or the status of the resource is unknown. The default is `5`
"""
return pulumi.get(self, "interval")
@property
@pulumi.getter(name="ipDscp")
def ip_dscp(self) -> pulumi.Output[int]:
"""
Displays the differentiated services code point (DSCP).The default is `0 (zero)`.
"""
return pulumi.get(self, "ip_dscp")
@property
@pulumi.getter(name="manualResume")
def manual_resume(self) -> pulumi.Output[str]:
"""
Specifies whether the system automatically changes the status of a resource to Enabled at the next successful monitor check.
"""
return pulumi.get(self, "manual_resume")
@property
@pulumi.getter
def mode(self) -> pulumi.Output[str]:
"""
Specifies the data transfer process (DTP) mode. The default value is passive. The options are passive (Specifies that the monitor sends a data transfer request to the FTP server. When the FTP server receives the request, the FTP server then initiates and establishes the data connection.) and active (Specifies that the monitor initiates and establishes the data connection with the FTP server.).
"""
return pulumi.get(self, "mode")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the Name of the LTM Monitor.Name of Monitor should be full path,full path is the combination of the `partition + monitor name`,For ex:`/Common/test-ltm-monitor`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parent(self) -> pulumi.Output[str]:
"""
Parent monitor for the system to use for setting initial values for the new monitor.
"""
return pulumi.get(self, "parent")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the password if the monitored target requires authentication
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def receive(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the regular expression representing the text string that the monitor looks for in the returned resource.
"""
return pulumi.get(self, "receive")
@property
@pulumi.getter(name="receiveDisable")
def receive_disable(self) -> pulumi.Output[Optional[str]]:
"""
The system marks the node or pool member disabled when its response matches Receive Disable String but not Receive String.
"""
return pulumi.get(self, "receive_disable")
@property
@pulumi.getter
def reverse(self) -> pulumi.Output[str]:
"""
Instructs the system to mark the target resource down when the test is successful.
"""
return pulumi.get(self, "reverse")
@property
@pulumi.getter
def send(self) -> pulumi.Output[str]:
"""
Specifies the text string that the monitor sends to the target object.
"""
return pulumi.get(self, "send")
@property
@pulumi.getter(name="timeUntilUp")
def time_until_up(self) -> pulumi.Output[int]:
"""
Specifies the number of seconds to wait after a resource first responds correctly to the monitor before setting the resource to up.
"""
return pulumi.get(self, "time_until_up")
@property
@pulumi.getter
def timeout(self) -> pulumi.Output[int]:
"""
Specifies the number of seconds the target has in which to respond to the monitor request. The default is `16` seconds
"""
return pulumi.get(self, "timeout")
@property
@pulumi.getter
def transparent(self) -> pulumi.Output[str]:
"""
Specifies whether the monitor operates in transparent mode.
"""
return pulumi.get(self, "transparent")
@property
@pulumi.getter(name="upInterval")
def up_interval(self) -> pulumi.Output[int]:
"""
Specifies the interval for the system to use to perform the health check when a resource is up. The default is `0(Disabled)`
"""
return pulumi.get(self, "up_interval")
@property
@pulumi.getter
def username(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the user name if the monitored target requires authentication
"""
return pulumi.get(self, "username")
| 48.564103
| 435
| 0.655823
| 7,254
| 58,714
| 5.183347
| 0.039702
| 0.08484
| 0.078191
| 0.080745
| 0.957553
| 0.951223
| 0.934069
| 0.925479
| 0.924122
| 0.915904
| 0
| 0.002379
| 0.248373
| 58,714
| 1,208
| 436
| 48.604305
| 0.84963
| 0.419423
| 0
| 0.888092
| 1
| 0
| 0.07149
| 0.000866
| 0
| 0
| 0
| 0
| 0
| 1
| 0.167862
| false
| 0.034433
| 0.007174
| 0
| 0.275466
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9791da6a59f8a2f414b93856dd8cfea232ce18e9
| 6,245
|
py
|
Python
|
api/tournaments/tests/test_model.py
|
BerniWittmann/beachanmeldung
|
9014dea5c31ea9e26f18d753d8d836741865c38e
|
[
"Unlicense",
"MIT"
] | null | null | null |
api/tournaments/tests/test_model.py
|
BerniWittmann/beachanmeldung
|
9014dea5c31ea9e26f18d753d8d836741865c38e
|
[
"Unlicense",
"MIT"
] | 5
|
2020-06-05T17:31:08.000Z
|
2022-03-11T23:16:12.000Z
|
api/tournaments/tests/test_model.py
|
BerniWittmann/beachanmeldung
|
9014dea5c31ea9e26f18d753d8d836741865c38e
|
[
"Unlicense",
"MIT"
] | null | null | null |
from django.test import TestCase
from django.utils import timezone
from django.utils.translation import activate
from api.accounts.models import MyUser
from api.enums import TeamStateTypes
from api.team.models import Team
from api.tournaments.models import Tournament
activate('en-us')
class Tournaments(TestCase):
tournament = None
user = None
def setUp(self):
self.tournament = Tournament.objects \
.create(name='Test Turnier',
gender='mixed',
start_date='2017-01-01',
end_date='2017-01-02',
deadline_signup='2017-01-01T00:00:00Z',
deadline_edit='2017-01-01T00:00:00Z',
advertisement_url='http://www.google.de',
contact_email='test@byom.de',
starting_fee=60.0,
number_of_places=23
)
self.user = MyUser.objects.create(email='test@byom.de',
first_name='Test',
last_name='User',
phone='+49192481024')
self.user.set_password('test123')
self.user.is_verified = True
self.user.is_staff = True
self.user.save()
def test_tournament_is_signup_open(self):
self.tournament.start_signup = timezone.now() + \
timezone.timedelta(days=-1)
self.tournament.deadline_signup = timezone.now() + \
timezone.timedelta(days=1)
self.assertTrue(self.tournament.signup_open)
self.assertFalse(self.tournament.is_after_signup)
self.assertFalse(self.tournament.is_before_signup)
def test_tournament_is_signup_not_open(self):
self.tournament.start_signup = timezone.now() + \
timezone.timedelta(days=-2)
self.tournament.deadline_signup = timezone.now() + \
timezone.timedelta(days=-1)
self.assertFalse(self.tournament.signup_open)
self.tournament.start_signup = timezone.now() + \
timezone.timedelta(days=2)
self.tournament.deadline_signup = timezone.now() + \
timezone.timedelta(days=3)
self.assertFalse(self.tournament.signup_open)
def test_tournament_is_before_signup(self):
self.tournament.start_signup = timezone.now() + \
timezone.timedelta(days=2)
self.tournament.deadline_signup = timezone.now() + \
timezone.timedelta(days=3)
self.assertFalse(self.tournament.signup_open)
self.assertFalse(self.tournament.is_after_signup)
self.assertTrue(self.tournament.is_before_signup)
def test_tournament_is_after_signup(self):
self.tournament.start_signup = timezone.now() + \
timezone.timedelta(days=-2)
self.tournament.deadline_signup = timezone.now() + \
timezone.timedelta(days=-3)
self.assertFalse(self.tournament.signup_open)
self.assertTrue(self.tournament.is_after_signup)
self.assertFalse(self.tournament.is_before_signup)
def test_tournament_team_count_empty(self):
self.assertEqual(self.tournament.active_teams.count(), 0)
self.assertEqual(self.tournament.total_count_teams, 0)
self.assertEqual(self.tournament.count_signed_up_teams, 0)
self.assertEqual(self.tournament.free_places, 23)
self.assertEqual(self.tournament.waitlist_count, 0)
self.assertEqual(self.tournament.approval_count, 0)
def test_tournament_team_count_approval(self):
Team.objects.create(
name='TSV Ismaning',
beachname='THC Eh Drin!',
tournament=self.tournament,
trainer=self.user,
state=TeamStateTypes.needs_approval
)
self.assertEqual(self.tournament.active_teams.count(), 1)
self.assertEqual(self.tournament.total_count_teams, 1)
self.assertEqual(self.tournament.count_signed_up_teams, 0)
self.assertEqual(self.tournament.free_places, 23)
self.assertEqual(self.tournament.waitlist_count, 1)
self.assertEqual(self.tournament.approval_count, 1)
def test_tournament_team_count_waitlist(self):
Team.objects.create(
name='TSV Ismaning',
beachname='THC Eh Drin!',
tournament=self.tournament,
trainer=self.user,
state=TeamStateTypes.waiting
)
self.assertEqual(self.tournament.active_teams.count(), 1)
self.assertEqual(self.tournament.total_count_teams, 1)
self.assertEqual(self.tournament.count_signed_up_teams, 0)
self.assertEqual(self.tournament.free_places, 23)
self.assertEqual(self.tournament.waitlist_count, 1)
self.assertEqual(self.tournament.approval_count, 0)
def test_tournament_team_count_signed_up(self):
Team.objects.create(
name='TSV Ismaning',
beachname='THC Eh Drin!',
tournament=self.tournament,
trainer=self.user,
state=TeamStateTypes.signed_up
)
self.assertEqual(self.tournament.active_teams.count(), 1)
self.assertEqual(self.tournament.total_count_teams, 1)
self.assertEqual(self.tournament.count_signed_up_teams, 1)
self.assertEqual(self.tournament.free_places, 22)
self.assertEqual(self.tournament.waitlist_count, 0)
self.assertEqual(self.tournament.approval_count, 0)
def test_tournament_team_count_denied(self):
Team.objects.create(
name='TSV Ismaning',
beachname='THC Eh Drin!',
tournament=self.tournament,
trainer=self.user,
state=TeamStateTypes.denied
)
self.assertEqual(self.tournament.active_teams.count(), 0)
self.assertEqual(self.tournament.total_count_teams, 0)
self.assertEqual(self.tournament.count_signed_up_teams, 0)
self.assertEqual(self.tournament.free_places, 23)
self.assertEqual(self.tournament.waitlist_count, 0)
self.assertEqual(self.tournament.approval_count, 0)
| 41.357616
| 66
| 0.63795
| 685
| 6,245
| 5.632117
| 0.157664
| 0.203214
| 0.147745
| 0.225505
| 0.798082
| 0.7662
| 0.755832
| 0.74676
| 0.746501
| 0.7338
| 0
| 0.023499
| 0.264051
| 6,245
| 150
| 67
| 41.633333
| 0.815927
| 0
| 0
| 0.561538
| 0
| 0
| 0.039872
| 0
| 0
| 0
| 0
| 0
| 0.315385
| 1
| 0.076923
| false
| 0.007692
| 0.053846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8ae6cc94ade51343b5fae69a32caf69b7bbcb66f
| 1,009
|
py
|
Python
|
files and exceptions/files/text files/learning python/learning_python.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | null | null | null |
files and exceptions/files/text files/learning python/learning_python.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | null | null | null |
files and exceptions/files/text files/learning python/learning_python.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | 1
|
2020-08-19T17:25:22.000Z
|
2020-08-19T17:25:22.000Z
|
filename = "learning_python.txt"
FileNotFoundError = IOError
try:
with open(filename) as file:
wtf = file.read()
print wtf.rstrip()
except FileNotFoundError:
with open(filename, "a") as file:
file.write("FOOL")
try:
with open(filename) as file:
for line in file:
print line.rstrip()
except FileNotFoundError:
with open(filename, "a") as file:
file.write("FOOL")
file_lines = []
try:
with open(filename) as file:
for line in file:
file_lines.append(line.rstrip())
except FileNotFoundError:
with open(filename, "a") as file:
file.write("FOOL")
else:
for line in file_lines:
print line
file_lines = []
try:
with open(filename) as file:
for line in file:
file_lines.append((line.rstrip()).replace("Python", "JavaScript"))
except FileNotFoundError:
with open(filename, "a") as file:
file.write("FOOL")
else:
for line in file_lines:
print line
| 21.934783
| 78
| 0.618434
| 128
| 1,009
| 4.820313
| 0.210938
| 0.103728
| 0.207455
| 0.105348
| 0.844408
| 0.844408
| 0.80389
| 0.80389
| 0.80389
| 0.80389
| 0
| 0
| 0.268583
| 1,009
| 45
| 79
| 22.422222
| 0.836043
| 0
| 0
| 0.815789
| 0
| 0
| 0.054509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.105263
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c124a66b91a1e15f2ce51f75eea44abe0d0ecde8
| 2,157
|
py
|
Python
|
panclassif/binarymerge.py
|
Kazi1318914/panclassif
|
56a57f5411eface902206e0cecf110d478433617
|
[
"MIT"
] | 1
|
2021-08-05T10:17:46.000Z
|
2021-08-05T10:17:46.000Z
|
panclassif/binarymerge.py
|
Zwei-inc/panclassif
|
1cfe5434a10572b40ea845e885564b9ebfb88572
|
[
"MIT"
] | null | null | null |
panclassif/binarymerge.py
|
Zwei-inc/panclassif
|
1cfe5434a10572b40ea845e885564b9ebfb88572
|
[
"MIT"
] | 2
|
2021-05-27T10:15:04.000Z
|
2021-08-15T06:30:26.000Z
|
def btrain(names,homepath):
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
#reading data and doing work
cresult=pd.DataFrame()
nresult=pd.DataFrame()
for index in range(len(names)):
Cancer = pd.read_csv(homepath+"/train_data/cancer/"+
names[index]+".txt.bz2",header=None, delimiter = "\t")
Normal = pd.read_csv(homepath+"/train_data/normal/"+
names[index]+".txt.bz2",header=None, delimiter = "\t")
Cancer= Cancer.T
Normal=Normal.T
frames1 = [Cancer, cresult]
cresult = pd.concat(frames1)
frames2 = [Normal, nresult]
nresult = pd.concat(frames2)
# merging all the cancer and normal data together and saving them
cresult.to_csv(r''+homepath+'/train_data/bin_Cancer.txt.bz2',
compression="bz2", sep='\t',header=None,index=None,index_label=None)
nresult.to_csv(r''+homepath+'/train_data/bin_Normal.txt.bz2',
compression="bz2", sep='\t',header=None,index=None,index_label=None)
def btest(names,homepath):
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
#reading data and doing work
cresult=pd.DataFrame()
nresult=pd.DataFrame()
for index in range(len(names)):
Cancer = pd.read_csv(homepath+"/test_data/cancer/"+
names[index]+".txt.bz2",header=None, delimiter = "\t")
Normal = pd.read_csv(homepath+"/test_data/normal/"+
names[index]+".txt.bz2",header=None, delimiter = "\t")
Cancer= Cancer.T
Normal=Normal.T
frames1 = [Cancer, cresult]
cresult = pd.concat(frames1)
frames2 = [Normal, nresult]
nresult = pd.concat(frames2)
# merging all the cancer and normal data together and saving them
cresult.to_csv(r''+homepath+'/test_data/bin_Cancer.txt.bz2',
compression="bz2", sep='\t',header=None,index=None,index_label=None)
nresult.to_csv(r''+homepath+'/test_data/bin_Normal.txt.bz2',
compression="bz2", sep='\t',header=None,index=None,index_label=None)
| 34.790323
| 85
| 0.639314
| 282
| 2,157
| 4.804965
| 0.191489
| 0.035424
| 0.026568
| 0.050185
| 0.987454
| 0.987454
| 0.97417
| 0.960886
| 0.960886
| 0.960886
| 0
| 0.011912
| 0.221604
| 2,157
| 62
| 86
| 34.790323
| 0.795116
| 0.085304
| 0
| 0.772727
| 0
| 0
| 0.138437
| 0.061877
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.136364
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c162c314152224f3b9852862524793f3f43714da
| 120
|
py
|
Python
|
canteenWeb/permissions.py
|
Atharva1111/canteen-automation-web
|
85eabde9f56179da3cc98797fdb7192b79bc5b19
|
[
"MIT"
] | null | null | null |
canteenWeb/permissions.py
|
Atharva1111/canteen-automation-web
|
85eabde9f56179da3cc98797fdb7192b79bc5b19
|
[
"MIT"
] | null | null | null |
canteenWeb/permissions.py
|
Atharva1111/canteen-automation-web
|
85eabde9f56179da3cc98797fdb7192b79bc5b19
|
[
"MIT"
] | null | null | null |
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.permissions import IsAuthenticated
| 40
| 64
| 0.916667
| 12
| 120
| 9
| 0.583333
| 0.148148
| 0.314815
| 0.518519
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 120
| 2
| 65
| 60
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c17989cb161fdac808f4438a1b97f375d296d729
| 174
|
py
|
Python
|
incasem/pipeline/sources/__init__.py
|
kirchhausenlab/incasem
|
ee9e007c5c04571e547e2fb5af5e800bd2d2b435
|
[
"BSD-3-Clause"
] | null | null | null |
incasem/pipeline/sources/__init__.py
|
kirchhausenlab/incasem
|
ee9e007c5c04571e547e2fb5af5e800bd2d2b435
|
[
"BSD-3-Clause"
] | null | null | null |
incasem/pipeline/sources/__init__.py
|
kirchhausenlab/incasem
|
ee9e007c5c04571e547e2fb5af5e800bd2d2b435
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from .data_sources_semantic import DataSourcesSemantic
from .data_sources_semantic_with_context import DataSourcesSemanticWithContext
| 34.8
| 78
| 0.913793
| 19
| 174
| 7.789474
| 0.578947
| 0.108108
| 0.202703
| 0.310811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074713
| 174
| 4
| 79
| 43.5
| 0.919255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c1b7ef90deeccbb103879acdd82a168e1f2952ae
| 26
|
py
|
Python
|
variables/var1.py
|
Anilkumar95/python-75-hackathon
|
0cc9304e46ceace826090614b46d8048a068d106
|
[
"MIT"
] | null | null | null |
variables/var1.py
|
Anilkumar95/python-75-hackathon
|
0cc9304e46ceace826090614b46d8048a068d106
|
[
"MIT"
] | null | null | null |
variables/var1.py
|
Anilkumar95/python-75-hackathon
|
0cc9304e46ceace826090614b46d8048a068d106
|
[
"MIT"
] | 2
|
2019-01-27T16:59:48.000Z
|
2019-01-29T13:07:40.000Z
|
y=v=p=9000
print(y, v, p)
| 8.666667
| 14
| 0.576923
| 8
| 26
| 1.875
| 0.625
| 0.266667
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 0.153846
| 26
| 2
| 15
| 13
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
c1e9aaaf2f9021fdd80b903afd18e1ff88259640
| 13,697
|
py
|
Python
|
pytorch_lm/rnn/rhn.py
|
DavidNemeskey/pytorch_lm
|
5ef28d863db5da5d88f3d7a6860b75065894c96a
|
[
"MIT"
] | null | null | null |
pytorch_lm/rnn/rhn.py
|
DavidNemeskey/pytorch_lm
|
5ef28d863db5da5d88f3d7a6860b75065894c96a
|
[
"MIT"
] | 8
|
2018-03-05T15:57:40.000Z
|
2019-02-14T13:53:07.000Z
|
pytorch_lm/rnn/rhn.py
|
DavidNemeskey/pytorch_lm
|
5ef28d863db5da5d88f3d7a6860b75065894c96a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# vim: set fileencoding=utf-8 :
"""Implements Recurrent Highway Networks from Zilly et al. (2017)."""
import torch
import torch.nn as nn
from torch.autograd import Variable
from pytorch_lm.dropout import create_dropout
class RhnBase(nn.Module):
"""Just the few things that are common to all variants."""
def __init__(self, input_size, hidden_size, num_layers, input_dropout=0,
state_dropout=0, transform_bias=None):
super(RhnBase, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.input_dropout = input_dropout
self.state_dropout = state_dropout
# t_bias is deleted by the pre-format hook
self.t_bias = self.transform_bias = transform_bias
self.register_forward_pre_hook(self.__class__.initialize_t)
def init_hidden(self, batch_size):
"""
Returns a :class:`Variable` for the hidden state. As I understand, we
only need one of these (as opposed to LSTM).
"""
return Variable(torch.Tensor(
batch_size, self.hidden_size).zero_().type(self.w_h.type()))
class OfficialRhn(RhnBase):
"""Pytorch version of the version implemented in Zilly's repo."""
def __init__(self, input_size, hidden_size, num_layers, input_dropout=0,
state_dropout=0, transform_bias=None):
super(OfficialRhn, self).__init__(input_size, hidden_size, num_layers,
input_dropout, state_dropout,
transform_bias)
self.input_do = create_dropout(input_dropout)
self.state_do = create_dropout(state_dropout)
self.w_h = nn.Parameter(torch.Tensor(input_size, hidden_size))
self.w_t = nn.Parameter(torch.Tensor(input_size, hidden_size))
self.r_h = [nn.Linear(hidden_size, hidden_size)
for l in range(self.num_layers)]
self.r_t = [nn.Linear(hidden_size, hidden_size)
for l in range(self.num_layers)]
for letter, lst in [('H', self.r_h), ('T', self.r_t)]:
for l, p in enumerate(lst, 1):
self.add_module('Rb_{}_{}'.format(letter, l), p)
def forward(self, input, s):
outputs = []
# To initialize per-sequence dropout
self.input_do.reset_noise()
self.state_do.reset_noise()
# chunk() cuts batch_size x 1 x input_size chunks from input
for input_t in map(torch.squeeze, input.chunk(input.size(1), dim=1)):
for l in range(self.num_layers):
# The input is processed only by the first layer
whx = self.input_do(input_t).matmul(self.w_h) if l == 0 else 0
wtx = self.input_do(input_t).matmul(self.w_t) if l == 0 else 0
# The gates (and the state)
h = torch.tanh(whx + self.r_h[l](self.state_do(s)))
t = torch.sigmoid(wtx + self.r_t[l](self.state_do(s)))
# The new state
s = (h - s) * t + s
# Here the output is the current s
outputs.append(s)
return outputs, s
# return torch.stack(outputs, 1), s
@classmethod
def initialize_t(cls, module, _):
"""Initializes the transform gate biases."""
if module.t_bias is not None:
for p in module.r_t:
nn.init.constant(p.bias, module.t_bias)
module.t_bias = None
class RhnLinTCTied(RhnBase):
"""Implements Recurrent Highway Networks from Zilly et al. (2017)."""
def __init__(self, input_size, hidden_size, num_layers, input_dropout=0,
state_dropout=0, transform_bias=None):
super(RhnLinTCTied, self).__init__(input_size, hidden_size, num_layers,
input_dropout, state_dropout,
transform_bias)
self.do_h = [create_dropout(input_dropout if l == 0 else state_dropout)
for l in range(self.num_layers + 1)]
self.do_t = [create_dropout(input_dropout if l == 0 else state_dropout)
for l in range(self.num_layers + 1)]
for letter, do_list in [('H', self.do_h), ('T', self.do_t)]:
self.add_module('Do_{}_w'.format(letter), do_list[0])
for l, do in enumerate(do_list[1:], 1):
self.add_module('Do_{}_{}'.format(letter, l), do)
self.w_h = nn.Parameter(torch.Tensor(input_size, hidden_size))
self.w_t = nn.Parameter(torch.Tensor(input_size, hidden_size))
self.r_h = [nn.Linear(hidden_size, hidden_size)
for l in range(self.num_layers)]
self.r_t = [nn.Linear(hidden_size, hidden_size)
for l in range(self.num_layers)]
for letter, lst in [('H', self.r_h), ('T', self.r_t)]:
for l, p in enumerate(lst, 1):
self.add_module('Rb_{}_{}'.format(letter, l), p)
@classmethod
def initialize_t(cls, module, _):
"""Initializes the transform gate biases."""
if module.t_bias is not None:
for p in module.r_t:
nn.init.constant(p.bias, module.t_bias)
module.t_bias = None
def forward(self, input, s):
outputs = []
# To initialize per-sequence dropout
for do in self.do_h + self.do_t:
do.reset_noise()
# chunk() cuts batch_size x 1 x input_size chunks from input
for input_t in map(torch.squeeze, input.chunk(input.size(1), dim=1)):
for l in range(self.num_layers):
# The input is processed only by the first layer
whx = self.do_h[0](input_t).matmul(self.w_h) if l == 0 else 0
wtx = self.do_t[0](input_t).matmul(self.w_t) if l == 0 else 0
# The gates (and the state)
h = torch.tanh(whx + self.r_h[l](self.do_h[l + 1](s)))
t = torch.sigmoid(wtx + self.r_t[l](self.do_t[l + 1](s)))
# The new state
s = (h - s) * t + s
# Here the output is the current s
outputs.append(s)
return torch.stack(outputs, 1), s
class Rhn(RhnBase):
"""Implements Recurrent Highway Networks from Zilly et al. (2017)."""
def __init__(self, input_size, hidden_size, num_layers, input_dropout=0,
state_dropout=0, transform_bias=None):
super(Rhn, self).__init__(input_size, hidden_size, num_layers,
input_dropout, state_dropout, transform_bias)
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.input_dropout = input_dropout
self.state_dropout = state_dropout
self.do_h = [create_dropout(input_dropout if l == 0 else state_dropout)
for l in range(self.num_layers + 1)]
self.do_t = [create_dropout(input_dropout if l == 0 else state_dropout)
for l in range(self.num_layers + 1)]
self.do_c = [create_dropout(input_dropout if l == 0 else state_dropout)
for l in range(self.num_layers + 1)]
for letter, do_list in [('H', self.do_h), ('T', self.do_t), ('C', self.do_c)]:
self.add_module('Do_{}_w'.format(letter), do_list[0])
for l, do in enumerate(do_list[1:], 1):
self.add_module('Do_{}_{}'.format(letter, l), do)
self.w_h = nn.Parameter(torch.Tensor(input_size, hidden_size))
self.w_t = nn.Parameter(torch.Tensor(input_size, hidden_size))
self.w_c = nn.Parameter(torch.Tensor(input_size, hidden_size))
self.r_hw = [nn.Parameter(torch.Tensor(hidden_size, hidden_size))
for l in range(self.num_layers)]
self.r_tw = [nn.Parameter(torch.Tensor(hidden_size, hidden_size))
for l in range(self.num_layers)]
self.r_cw = [nn.Parameter(torch.Tensor(hidden_size, hidden_size))
for l in range(self.num_layers)]
self.r_hb = [nn.Parameter(torch.Tensor(hidden_size))
for l in range(self.num_layers)]
self.r_tb = [nn.Parameter(torch.Tensor(hidden_size))
for l in range(self.num_layers)]
self.r_cb = [nn.Parameter(torch.Tensor(hidden_size))
for l in range(self.num_layers)]
for letter, lst in [('H', self.r_hw), ('T', self.r_tw), ('C', self.r_cw)]:
for l, p in enumerate(lst, 1):
self.register_parameter('R_{}_{}_weight'.format(letter, l), p)
for letter, lst in [('H', self.r_hb), ('T', self.r_tb), ('C', self.r_cb)]:
for l, p in enumerate(lst, 1):
self.register_parameter('R_{}_{}_bias'.format(letter, l), p)
@classmethod
def initialize_t(cls, module, _):
"""Initializes the transform gate biases. Deletes the """
if module.t_bias is not None:
for p in module.r_tb:
nn.init.constant(p, module.t_bias)
module.t_bias = None
def forward(self, input, s):
outputs = []
# To initialize per-sequence dropout
for do in self.do_h + self.do_t + self.do_c:
do.reset_noise()
# chunk() cuts batch_size x 1 x input_size chunks from input
for input_t in map(torch.squeeze, input.chunk(input.size(1), dim=1)):
for l in range(self.num_layers):
# The input is processed only by the first layer
whx = self.do_h[0](input_t).matmul(self.w_h) if l == 0 else 0
wtx = self.do_t[0](input_t).matmul(self.w_t) if l == 0 else 0
wcx = self.do_c[0](input_t).matmul(self.w_c) if l == 0 else 0
rhs = self.do_h[l + 1](s).matmul(self.r_hw[l]) + self.r_hb[l]
rts = self.do_t[l + 1](s).matmul(self.r_tw[l]) + self.r_tb[l]
rcs = self.do_c[l + 1](s).matmul(self.r_cw[l]) + self.r_cb[l]
# The gates (and the state)
h = torch.tanh(whx + rhs)
t = torch.sigmoid(wtx + rts)
c = torch.sigmoid(wcx + rcs)
# The new state
s = h * t + s * c
# Here the output is the current s
outputs.append(s)
return torch.stack(outputs, 1), s
class RhnLin(RhnBase):
"""Implements Recurrent Highway Networks from Zilly et al. (2017)."""
def __init__(self, input_size, hidden_size, num_layers, input_dropout=0,
state_dropout=0, transform_bias=None):
super(RhnLin, self).__init__(input_size, hidden_size, num_layers,
input_dropout, state_dropout,
transform_bias)
self.do_h = [create_dropout(input_dropout if l == 0 else state_dropout)
for l in range(self.num_layers + 1)]
self.do_t = [create_dropout(input_dropout if l == 0 else state_dropout)
for l in range(self.num_layers + 1)]
self.do_c = [create_dropout(input_dropout if l == 0 else state_dropout)
for l in range(self.num_layers + 1)]
for letter, do_list in [('H', self.do_h), ('T', self.do_t), ('C', self.do_c)]:
self.add_module('Do_{}_w'.format(letter), do_list[0])
for l, do in enumerate(do_list[1:], 1):
self.add_module('Do_{}_{}'.format(letter, l), do)
self.w_h = nn.Parameter(torch.Tensor(input_size, hidden_size))
self.w_t = nn.Parameter(torch.Tensor(input_size, hidden_size))
self.w_c = nn.Parameter(torch.Tensor(input_size, hidden_size))
self.r_h = [nn.Linear(hidden_size, hidden_size)
for l in range(self.num_layers)]
self.r_t = [nn.Linear(hidden_size, hidden_size)
for l in range(self.num_layers)]
self.r_c = [nn.Linear(hidden_size, hidden_size)
for l in range(self.num_layers)]
for letter, lst in [('H', self.r_h), ('T', self.r_t), ('C', self.r_c)]:
for l, p in enumerate(lst, 1):
self.add_module('Rb_{}_{}'.format(letter, l), p)
@classmethod
def initialize_t(cls, module, _):
"""Initializes the transform gate biases. Deletes the """
if module.t_bias is not None:
for p in module.r_t:
nn.init.constant(p.bias, module.t_bias)
module.t_bias = None
def forward(self, input, s):
outputs = []
# To initialize per-sequence dropout
for do in self.do_h + self.do_t + self.do_c:
do.reset_noise()
# chunk() cuts batch_size x 1 x input_size chunks from input
for input_t in map(torch.squeeze, input.chunk(input.size(1), dim=1)):
for l in range(self.num_layers):
# The input is processed only by the first layer
whx = self.do_h[0](input_t).matmul(self.w_h) if l == 0 else 0
wtx = self.do_t[0](input_t).matmul(self.w_t) if l == 0 else 0
wcx = self.do_c[0](input_t).matmul(self.w_c) if l == 0 else 0
# The gates (and the state)
h = torch.tanh(whx + self.r_h[l](self.do_h[l + 1](s)))
t = torch.sigmoid(wtx + self.r_t[l](self.do_t[l + 1](s)))
c = torch.sigmoid(wcx + self.r_c[l](self.do_c[l + 1](s)))
# The new state
s = h * t + s * c
# Here the output is the current s
outputs.append(s)
return torch.stack(outputs, 1), s
| 44.908197
| 86
| 0.575162
| 2,004
| 13,697
| 3.715569
| 0.078842
| 0.063121
| 0.058286
| 0.036933
| 0.877384
| 0.871743
| 0.862342
| 0.856164
| 0.853881
| 0.842734
| 0
| 0.011598
| 0.307586
| 13,697
| 304
| 87
| 45.055921
| 0.773513
| 0.120318
| 0
| 0.752381
| 0
| 0
| 0.009715
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.019048
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a9e77b7c562b88797d2751ce704e9dfde52da7c8
| 74
|
py
|
Python
|
datamol/scaffold/__init__.py
|
hengwei-chan/fragmentation_and_assemble
|
35b56dd1c97cfa6731bcbfd68c3eb01bfcd7668f
|
[
"Apache-2.0"
] | 130
|
2021-04-08T11:15:27.000Z
|
2022-03-25T01:51:47.000Z
|
datamol/scaffold/__init__.py
|
hengwei-chan/fragmentation_and_assemble
|
35b56dd1c97cfa6731bcbfd68c3eb01bfcd7668f
|
[
"Apache-2.0"
] | 72
|
2021-04-08T11:46:51.000Z
|
2022-03-29T01:27:41.000Z
|
datamol/scaffold/__init__.py
|
hengwei-chan/fragmentation_and_assemble
|
35b56dd1c97cfa6731bcbfd68c3eb01bfcd7668f
|
[
"Apache-2.0"
] | 11
|
2021-04-20T10:27:38.000Z
|
2022-03-07T07:29:30.000Z
|
from ._fuzzy import trim_side_chain
from ._fuzzy import fuzzy_scaffolding
| 24.666667
| 37
| 0.864865
| 11
| 74
| 5.363636
| 0.636364
| 0.305085
| 0.508475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 74
| 2
| 38
| 37
| 0.893939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e71648210307ad389e0a0d10d62767d2f521d5f1
| 2,776
|
py
|
Python
|
tests/test_query.py
|
billyrrr/firestore-odm
|
1a63c51a13a6025d7dd05630091619644c41cba0
|
[
"MIT"
] | 1
|
2021-05-01T11:58:52.000Z
|
2021-05-01T11:58:52.000Z
|
tests/test_query.py
|
billyrrr/firestore-odm
|
1a63c51a13a6025d7dd05630091619644c41cba0
|
[
"MIT"
] | null | null | null |
tests/test_query.py
|
billyrrr/firestore-odm
|
1a63c51a13a6025d7dd05630091619644c41cba0
|
[
"MIT"
] | null | null | null |
import pytest
from .fixtures import CTX
from .city_fixtures import setup_cities, City
from firestore_odm.cmp import v
@pytest.mark.usefixtures("setup_cities")
def test_query_with_attr_str():
expected_dict = {
'Washington D.C.': {
'cityName': 'Washington D.C.',
'country': 'USA',
'capital': True,
'obj_type': "Municipality",
'doc_id': 'DC',
'doc_ref': 'City/DC'
},
'San Francisco': {
'cityName': 'San Francisco',
'cityState': 'CA',
'country': 'USA',
'capital': False,
'regions': ['west_coast', 'norcal'],
'obj_type': "StandardCity",
'doc_id': 'SF',
'doc_ref': 'City/SF'
},
'Los Angeles': {
'cityName': 'Los Angeles',
'cityState': 'CA',
'country': 'USA',
'capital': False,
'regions': ['west_coast', 'socal'],
'obj_type': "StandardCity",
'doc_id': 'LA',
'doc_ref': 'City/LA'
}
}
res_dict = dict()
for obj in City.where(country=("==", "USA")):
d = obj.to_dict()
res_dict[d["cityName"]] = d
assert res_dict['Washington D.C.'] == expected_dict['Washington D.C.']
assert res_dict['San Francisco'] == expected_dict['San Francisco']
assert res_dict['Los Angeles'] == expected_dict['Los Angeles']
@pytest.mark.usefixtures("setup_cities")
def test_query_with_cmp():
expected_dict = {
'Washington D.C.': {
'cityName': 'Washington D.C.',
'country': 'USA',
'capital': True,
'obj_type': "Municipality",
'doc_id': 'DC',
'doc_ref': 'City/DC'
},
'San Francisco': {
'cityName': 'San Francisco',
'cityState': 'CA',
'country': 'USA',
'capital': False,
'regions': ['west_coast', 'norcal'],
'obj_type': "StandardCity",
'doc_id': 'SF',
'doc_ref': 'City/SF'
},
'Los Angeles': {
'cityName': 'Los Angeles',
'cityState': 'CA',
'country': 'USA',
'capital': False,
'regions': ['west_coast', 'socal'],
'obj_type': "StandardCity",
'doc_id': 'LA',
'doc_ref': 'City/LA'
}
}
res_dict = dict()
for obj in City.where(v.country == "USA"):
d = obj.to_dict()
res_dict[d["cityName"]] = d
assert res_dict['Washington D.C.'] == expected_dict['Washington D.C.']
assert res_dict['San Francisco'] == expected_dict['San Francisco']
assert res_dict['Los Angeles'] == expected_dict['Los Angeles']
| 28.040404
| 74
| 0.493156
| 283
| 2,776
| 4.646643
| 0.204947
| 0.053232
| 0.073004
| 0.073004
| 0.918631
| 0.918631
| 0.918631
| 0.918631
| 0.918631
| 0.845627
| 0
| 0
| 0.340058
| 2,776
| 98
| 75
| 28.326531
| 0.717795
| 0
| 0
| 0.804878
| 0
| 0
| 0.322883
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 1
| 0.02439
| false
| 0
| 0.04878
| 0
| 0.073171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e7a7faae6fcfca06d1450576ae9a1b6fa747e3b6
| 73
|
py
|
Python
|
django/myvenv/lib/python3.8/site-packages/krwordrank/word/__init__.py
|
mu1616/relay_06
|
9db6385c88d06cad861245f80c2b1799170be905
|
[
"MIT"
] | 4
|
2020-07-27T02:50:37.000Z
|
2021-04-06T09:48:30.000Z
|
django/myvenv/lib/python3.8/site-packages/krwordrank/word/__init__.py
|
mu1616/relay_06
|
9db6385c88d06cad861245f80c2b1799170be905
|
[
"MIT"
] | 2
|
2020-08-07T07:33:37.000Z
|
2020-08-16T14:50:56.000Z
|
django/myvenv/lib/python3.8/site-packages/krwordrank/word/__init__.py
|
mu1616/relay_06
|
9db6385c88d06cad861245f80c2b1799170be905
|
[
"MIT"
] | 11
|
2020-07-31T08:20:43.000Z
|
2020-08-21T04:08:29.000Z
|
from ._word import summarize_with_keywords
from ._word import KRWordRank
| 24.333333
| 42
| 0.863014
| 10
| 73
| 5.9
| 0.7
| 0.271186
| 0.474576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 73
| 2
| 43
| 36.5
| 0.907692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e7b45320600a71d6d7c3ad6f81e0326bcc545405
| 295
|
py
|
Python
|
aior/components/__init__.py
|
Dephin/aior
|
f0810eb5dadecc3ca2597094a7636509ef69584d
|
[
"MIT"
] | null | null | null |
aior/components/__init__.py
|
Dephin/aior
|
f0810eb5dadecc3ca2597094a7636509ef69584d
|
[
"MIT"
] | null | null | null |
aior/components/__init__.py
|
Dephin/aior
|
f0810eb5dadecc3ca2597094a7636509ef69584d
|
[
"MIT"
] | null | null | null |
from aior.components.http_exceptions import *
from aior.components.http_handler import *
from aior.components.http_status import *
from aior.components.ws_exceptions import *
from aior.components.ws_handlers import *
from aior.components.stdin_handler import *
from aior.components.dao import *
| 36.875
| 45
| 0.833898
| 41
| 295
| 5.853659
| 0.292683
| 0.233333
| 0.525
| 0.6
| 0.691667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094915
| 295
| 7
| 46
| 42.142857
| 0.898876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e7c1e26930af54df50d0c5f79a0f2444ca975802
| 39,333
|
py
|
Python
|
sdk/python/pulumi_azure/desktopvirtualization/scaling_plan.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/desktopvirtualization/scaling_plan.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/desktopvirtualization/scaling_plan.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ScalingPlanArgs', 'ScalingPlan']
@pulumi.input_type
class ScalingPlanArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
schedules: pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]],
time_zone: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
exclusion_tag: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pools: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ScalingPlan resource.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]] schedules: One or more `schedule` blocks as defined below.
:param pulumi.Input[str] time_zone: Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
:param pulumi.Input[str] description: A description of the Scaling Plan.
:param pulumi.Input[str] exclusion_tag: The name of the tag associated with the VMs you want to exclude from autoscaling.
:param pulumi.Input[str] friendly_name: Friendly name of the Scaling Plan.
:param pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]] host_pools: One or more `host_pool` blocks as defined below.
:param pulumi.Input[str] location: The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] name: The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "schedules", schedules)
pulumi.set(__self__, "time_zone", time_zone)
if description is not None:
pulumi.set(__self__, "description", description)
if exclusion_tag is not None:
pulumi.set(__self__, "exclusion_tag", exclusion_tag)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if host_pools is not None:
pulumi.set(__self__, "host_pools", host_pools)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def schedules(self) -> pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]]:
"""
One or more `schedule` blocks as defined below.
"""
return pulumi.get(self, "schedules")
@schedules.setter
def schedules(self, value: pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]]):
pulumi.set(self, "schedules", value)
@property
@pulumi.getter(name="timeZone")
def time_zone(self) -> pulumi.Input[str]:
"""
Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
"""
return pulumi.get(self, "time_zone")
@time_zone.setter
def time_zone(self, value: pulumi.Input[str]):
pulumi.set(self, "time_zone", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the Scaling Plan.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="exclusionTag")
def exclusion_tag(self) -> Optional[pulumi.Input[str]]:
"""
The name of the tag associated with the VMs you want to exclude from autoscaling.
"""
return pulumi.get(self, "exclusion_tag")
@exclusion_tag.setter
def exclusion_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "exclusion_tag", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the Scaling Plan.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="hostPools")
def host_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]]]:
"""
One or more `host_pool` blocks as defined below.
"""
return pulumi.get(self, "host_pools")
@host_pools.setter
def host_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]]]):
pulumi.set(self, "host_pools", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ScalingPlanState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
exclusion_tag: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pools: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
schedules: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
time_zone: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ScalingPlan resources.
:param pulumi.Input[str] description: A description of the Scaling Plan.
:param pulumi.Input[str] exclusion_tag: The name of the tag associated with the VMs you want to exclude from autoscaling.
:param pulumi.Input[str] friendly_name: Friendly name of the Scaling Plan.
:param pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]] host_pools: One or more `host_pool` blocks as defined below.
:param pulumi.Input[str] location: The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] name: The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]] schedules: One or more `schedule` blocks as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
:param pulumi.Input[str] time_zone: Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
"""
if description is not None:
pulumi.set(__self__, "description", description)
if exclusion_tag is not None:
pulumi.set(__self__, "exclusion_tag", exclusion_tag)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if host_pools is not None:
pulumi.set(__self__, "host_pools", host_pools)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if schedules is not None:
pulumi.set(__self__, "schedules", schedules)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if time_zone is not None:
pulumi.set(__self__, "time_zone", time_zone)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the Scaling Plan.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="exclusionTag")
def exclusion_tag(self) -> Optional[pulumi.Input[str]]:
"""
The name of the tag associated with the VMs you want to exclude from autoscaling.
"""
return pulumi.get(self, "exclusion_tag")
@exclusion_tag.setter
def exclusion_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "exclusion_tag", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the Scaling Plan.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="hostPools")
def host_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]]]:
"""
One or more `host_pool` blocks as defined below.
"""
return pulumi.get(self, "host_pools")
@host_pools.setter
def host_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanHostPoolArgs']]]]):
pulumi.set(self, "host_pools", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def schedules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]]]:
"""
One or more `schedule` blocks as defined below.
"""
return pulumi.get(self, "schedules")
@schedules.setter
def schedules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPlanScheduleArgs']]]]):
pulumi.set(self, "schedules", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="timeZone")
def time_zone(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
"""
return pulumi.get(self, "time_zone")
@time_zone.setter
def time_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_zone", value)
class ScalingPlan(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
exclusion_tag: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanHostPoolArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
schedules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanScheduleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
time_zone: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Virtual Desktop Scaling Plan.
## Disclaimers
> **Note** Scaling Plans are currently in preview and are only supported in a limited number of regions. Both the Scaling Plan and any referenced Host Pools must be deployed in a supported region. [Autoscale (preview) for Azure Virtual Desktop host pools](https://docs.microsoft.com/en-us/azure/virtual-desktop/autoscale-scaling-plan).
> **Note** Scaling Plans require specific permissions to be granted to the Windows Virtual Desktop application before a 'host_pool' can be configured. [Required Permissions for Scaling Plans](https://docs.microsoft.com/en-us/azure/virtual-desktop/autoscale-scaling-plan#create-a-custom-rbac-role).
## Example Usage
```python
import pulumi
import pulumi_azure as azure
import pulumi_azuread as azuread
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_role_definition = azure.authorization.RoleDefinition("exampleRoleDefinition",
scope=example_resource_group.id,
description="AVD AutoScale Role",
permissions=[azure.authorization.RoleDefinitionPermissionArgs(
actions=[
"Microsoft.Insights/eventtypes/values/read",
"Microsoft.Compute/virtualMachines/deallocate/action",
"Microsoft.Compute/virtualMachines/restart/action",
"Microsoft.Compute/virtualMachines/powerOff/action",
"Microsoft.Compute/virtualMachines/start/action",
"Microsoft.Compute/virtualMachines/read",
"Microsoft.DesktopVirtualization/hostpools/read",
"Microsoft.DesktopVirtualization/hostpools/write",
"Microsoft.DesktopVirtualization/hostpools/sessionhosts/read",
"Microsoft.DesktopVirtualization/hostpools/sessionhosts/write",
"Microsoft.DesktopVirtualization/hostpools/sessionhosts/usersessions/delete",
"Microsoft.DesktopVirtualization/hostpools/sessionhosts/usersessions/read",
"Microsoft.DesktopVirtualization/hostpools/sessionhosts/usersessions/sendMessage/action",
"Microsoft.DesktopVirtualization/hostpools/sessionhosts/usersessions/read",
],
not_actions=[],
)],
assignable_scopes=[example_resource_group.id])
example_service_principal = azuread.get_service_principal(display_name="Windows Virtual Desktop")
example_assignment = azure.authorization.Assignment("exampleAssignment",
name=random_uuid["example"]["result"],
scope=example_resource_group.id,
role_definition_id=example_role_definition.role_definition_resource_id,
principal_id=example_service_principal.application_id,
skip_service_principal_aad_check=True)
example_host_pool = azure.desktopvirtualization.HostPool("exampleHostPool",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
type="Pooled",
validate_environment=True,
load_balancer_type="BreadthFirst")
example_scaling_plan = azure.desktopvirtualization.ScalingPlan("exampleScalingPlan",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
friendly_name="Scaling Plan Example",
description="Example Scaling Plan",
time_zone="GMT Standard Time",
schedules=[azure.desktopvirtualization.ScalingPlanScheduleArgs(
name="Weekdays",
days_of_weeks=[
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
],
ramp_up_start_time="05:00",
ramp_up_load_balancing_algorithm="BreadthFirst",
ramp_up_minimum_hosts_percent=20,
ramp_up_capacity_threshold_percent=10,
peak_start_time="09:00",
peak_load_balancing_algorithm="BreadthFirst",
ramp_down_start_time="19:00",
ramp_down_load_balancing_algorithm="DepthFirst",
ramp_down_minimum_hosts_percent=10,
ramp_down_force_logoff_users=False,
ramp_down_wait_time_minutes=45,
ramp_down_notification_message="Please log off in the next 45 minutes...",
ramp_down_capacity_threshold_percent=5,
ramp_down_stop_hosts_when="ZeroSessions",
off_peak_start_time="22:00",
off_peak_load_balancing_algorithm="DepthFirst",
)],
host_pools=[azure.desktopvirtualization.ScalingPlanHostPoolArgs(
hostpool_id=example_host_pool.id,
scaling_plan_enabled=True,
)])
```
## Import
Virtual Desktop Scaling Plans can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:desktopvirtualization/scalingPlan:ScalingPlan example /subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/scalingPlans/plan1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description of the Scaling Plan.
:param pulumi.Input[str] exclusion_tag: The name of the tag associated with the VMs you want to exclude from autoscaling.
:param pulumi.Input[str] friendly_name: Friendly name of the Scaling Plan.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanHostPoolArgs']]]] host_pools: One or more `host_pool` blocks as defined below.
:param pulumi.Input[str] location: The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] name: The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanScheduleArgs']]]] schedules: One or more `schedule` blocks as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
:param pulumi.Input[str] time_zone: Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ScalingPlanArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Virtual Desktop Scaling Plan.
## Disclaimers
> **Note** Scaling Plans are currently in preview and are only supported in a limited number of regions. Both the Scaling Plan and any referenced Host Pools must be deployed in a supported region. [Autoscale (preview) for Azure Virtual Desktop host pools](https://docs.microsoft.com/en-us/azure/virtual-desktop/autoscale-scaling-plan).
> **Note** Scaling Plans require specific permissions to be granted to the Windows Virtual Desktop application before a 'host_pool' can be configured. [Required Permissions for Scaling Plans](https://docs.microsoft.com/en-us/azure/virtual-desktop/autoscale-scaling-plan#create-a-custom-rbac-role).
## Example Usage
```python
import pulumi
import pulumi_azure as azure
import pulumi_azuread as azuread
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_role_definition = azure.authorization.RoleDefinition("exampleRoleDefinition",
scope=example_resource_group.id,
description="AVD AutoScale Role",
permissions=[azure.authorization.RoleDefinitionPermissionArgs(
actions=[
"Microsoft.Insights/eventtypes/values/read",
"Microsoft.Compute/virtualMachines/deallocate/action",
"Microsoft.Compute/virtualMachines/restart/action",
"Microsoft.Compute/virtualMachines/powerOff/action",
"Microsoft.Compute/virtualMachines/start/action",
"Microsoft.Compute/virtualMachines/read",
"Microsoft.DesktopVirtualization/hostpools/read",
"Microsoft.DesktopVirtualization/hostpools/write",
"Microsoft.DesktopVirtualization/hostpools/sessionhosts/read",
"Microsoft.DesktopVirtualization/hostpools/sessionhosts/write",
"Microsoft.DesktopVirtualization/hostpools/sessionhosts/usersessions/delete",
"Microsoft.DesktopVirtualization/hostpools/sessionhosts/usersessions/read",
"Microsoft.DesktopVirtualization/hostpools/sessionhosts/usersessions/sendMessage/action",
"Microsoft.DesktopVirtualization/hostpools/sessionhosts/usersessions/read",
],
not_actions=[],
)],
assignable_scopes=[example_resource_group.id])
example_service_principal = azuread.get_service_principal(display_name="Windows Virtual Desktop")
example_assignment = azure.authorization.Assignment("exampleAssignment",
name=random_uuid["example"]["result"],
scope=example_resource_group.id,
role_definition_id=example_role_definition.role_definition_resource_id,
principal_id=example_service_principal.application_id,
skip_service_principal_aad_check=True)
example_host_pool = azure.desktopvirtualization.HostPool("exampleHostPool",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
type="Pooled",
validate_environment=True,
load_balancer_type="BreadthFirst")
example_scaling_plan = azure.desktopvirtualization.ScalingPlan("exampleScalingPlan",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
friendly_name="Scaling Plan Example",
description="Example Scaling Plan",
time_zone="GMT Standard Time",
schedules=[azure.desktopvirtualization.ScalingPlanScheduleArgs(
name="Weekdays",
days_of_weeks=[
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
],
ramp_up_start_time="05:00",
ramp_up_load_balancing_algorithm="BreadthFirst",
ramp_up_minimum_hosts_percent=20,
ramp_up_capacity_threshold_percent=10,
peak_start_time="09:00",
peak_load_balancing_algorithm="BreadthFirst",
ramp_down_start_time="19:00",
ramp_down_load_balancing_algorithm="DepthFirst",
ramp_down_minimum_hosts_percent=10,
ramp_down_force_logoff_users=False,
ramp_down_wait_time_minutes=45,
ramp_down_notification_message="Please log off in the next 45 minutes...",
ramp_down_capacity_threshold_percent=5,
ramp_down_stop_hosts_when="ZeroSessions",
off_peak_start_time="22:00",
off_peak_load_balancing_algorithm="DepthFirst",
)],
host_pools=[azure.desktopvirtualization.ScalingPlanHostPoolArgs(
hostpool_id=example_host_pool.id,
scaling_plan_enabled=True,
)])
```
## Import
Virtual Desktop Scaling Plans can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:desktopvirtualization/scalingPlan:ScalingPlan example /subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/scalingPlans/plan1
```
:param str resource_name: The name of the resource.
:param ScalingPlanArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ScalingPlanArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
exclusion_tag: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanHostPoolArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
schedules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanScheduleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
time_zone: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ScalingPlanArgs.__new__(ScalingPlanArgs)
__props__.__dict__["description"] = description
__props__.__dict__["exclusion_tag"] = exclusion_tag
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["host_pools"] = host_pools
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if schedules is None and not opts.urn:
raise TypeError("Missing required property 'schedules'")
__props__.__dict__["schedules"] = schedules
__props__.__dict__["tags"] = tags
if time_zone is None and not opts.urn:
raise TypeError("Missing required property 'time_zone'")
__props__.__dict__["time_zone"] = time_zone
super(ScalingPlan, __self__).__init__(
'azure:desktopvirtualization/scalingPlan:ScalingPlan',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
exclusion_tag: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanHostPoolArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
schedules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanScheduleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
time_zone: Optional[pulumi.Input[str]] = None) -> 'ScalingPlan':
"""
Get an existing ScalingPlan resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description of the Scaling Plan.
:param pulumi.Input[str] exclusion_tag: The name of the tag associated with the VMs you want to exclude from autoscaling.
:param pulumi.Input[str] friendly_name: Friendly name of the Scaling Plan.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanHostPoolArgs']]]] host_pools: One or more `host_pool` blocks as defined below.
:param pulumi.Input[str] location: The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] name: The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingPlanScheduleArgs']]]] schedules: One or more `schedule` blocks as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
:param pulumi.Input[str] time_zone: Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ScalingPlanState.__new__(_ScalingPlanState)
__props__.__dict__["description"] = description
__props__.__dict__["exclusion_tag"] = exclusion_tag
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["host_pools"] = host_pools
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["schedules"] = schedules
__props__.__dict__["tags"] = tags
__props__.__dict__["time_zone"] = time_zone
return ScalingPlan(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description of the Scaling Plan.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="exclusionTag")
def exclusion_tag(self) -> pulumi.Output[Optional[str]]:
"""
The name of the tag associated with the VMs you want to exclude from autoscaling.
"""
return pulumi.get(self, "exclusion_tag")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
Friendly name of the Scaling Plan.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="hostPools")
def host_pools(self) -> pulumi.Output[Optional[Sequence['outputs.ScalingPlanHostPool']]]:
"""
One or more `host_pool` blocks as defined below.
"""
return pulumi.get(self, "host_pools")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The Azure Region where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name which should be used for this Virtual Desktop Scaling Plan . Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group where the Virtual Desktop Scaling Plan should exist. Changing this forces a new Virtual Desktop Scaling Plan to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def schedules(self) -> pulumi.Output[Sequence['outputs.ScalingPlanSchedule']]:
"""
One or more `schedule` blocks as defined below.
"""
return pulumi.get(self, "schedules")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags which should be assigned to the Virtual Desktop Scaling Plan .
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="timeZone")
def time_zone(self) -> pulumi.Output[str]:
"""
Specifies the Time Zone which should be used by the Scaling Plan for time based events, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
"""
return pulumi.get(self, "time_zone")
| 51.214844
| 343
| 0.661785
| 4,482
| 39,333
| 5.622936
| 0.07519
| 0.07551
| 0.058884
| 0.049758
| 0.927863
| 0.916396
| 0.905325
| 0.895088
| 0.889255
| 0.870883
| 0
| 0.005532
| 0.241731
| 39,333
| 767
| 344
| 51.281617
| 0.839464
| 0.489691
| 0
| 0.775568
| 1
| 0
| 0.10916
| 0.030894
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161932
| false
| 0.002841
| 0.019886
| 0
| 0.278409
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
99bd776d214bf75318cac9ecc8345a43d80fbccb
| 172
|
py
|
Python
|
commands/validate.py
|
maxachis/city-scrapers-pitt
|
63150a522a512d35b64e4068e25169cf46875598
|
[
"MIT"
] | 5
|
2020-03-26T05:22:20.000Z
|
2021-04-22T12:28:56.000Z
|
commands/validate.py
|
maxachis/city-scrapers-pitt
|
63150a522a512d35b64e4068e25169cf46875598
|
[
"MIT"
] | 109
|
2020-02-09T21:42:36.000Z
|
2021-03-06T21:41:18.000Z
|
commands/validate.py
|
maxachis/city-scrapers-pitt
|
63150a522a512d35b64e4068e25169cf46875598
|
[
"MIT"
] | 15
|
2020-05-29T22:43:34.000Z
|
2021-02-20T02:59:44.000Z
|
"""Import the "validate" command from the city_scrapers_core project"""
import city_scrapers_core.commands.validate as validate
class Command(validate.Command):
pass
| 24.571429
| 71
| 0.796512
| 23
| 172
| 5.782609
| 0.565217
| 0.225564
| 0.240602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122093
| 172
| 6
| 72
| 28.666667
| 0.880795
| 0.377907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
822fe6af386d68d335dbaa910f601172261baec0
| 42,114
|
py
|
Python
|
carbondesign/tests/test_modal_html.py
|
dozymoe/django-carbondesign
|
34aed0cfdccfa90fcb5bf2bbd347229815f1417b
|
[
"MIT"
] | null | null | null |
carbondesign/tests/test_modal_html.py
|
dozymoe/django-carbondesign
|
34aed0cfdccfa90fcb5bf2bbd347229815f1417b
|
[
"MIT"
] | null | null | null |
carbondesign/tests/test_modal_html.py
|
dozymoe/django-carbondesign
|
34aed0cfdccfa90fcb5bf2bbd347229815f1417b
|
[
"MIT"
] | null | null | null |
# pylint:disable=missing-module-docstring,missing-class-docstring,missing-function-docstring,too-many-lines
from .base import compare_template, SimpleTestCase
class ModalHtmlTest(SimpleTestCase):
maxDiff = None
def test_danger(self):
template = """
{% load carbondesign %}
{% ModalTrigger variant="danger" target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" variant="danger" %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}Modal heading{% endSlot %}
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
{% Slot 'footer' %}
{% Button variant="secondary" type="button" data-modal-close="" %}
Secondary button
{% endButton %}
{% Button variant="danger" type="button" label="Danger" data-modal-primary-focus="" %}
Primary button
{% endButton %}
{% endSlot %}
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--danger" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal bx--modal--danger" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Modal heading
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content">
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
</div>
<div class="bx--modal-content--overflow-indicator"></div>
<div class="bx--modal-footer">
<button class="bx--btn bx--btn--secondary" type="button" data-modal-close="">
Secondary button
</button>
<button class="bx--btn bx--btn--danger" type="button" data-modal-primary-focus="" aria-label="Danger">
Primary button
</button>
</div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_input(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" has_form=True %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}Modal heading{% endSlot %}
{% TextInput form.text_empty label="Text Input label" placeholder="Optional placeholder text" data-modal-primary-focus="" %}
{% Slot 'footer' %}
{% Button variant="secondary" type="button" data-modal-close="" %}
Secondary button
{% endButton %}
{% Button type="button" %}
Primary button
{% endButton %}
{% endSlot %}
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Modal heading
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content bx--modal-content--with-form" >
<div class="bx--form-item bx--text-input-wrapper">
<label for="id_text_empty" class="bx--label">
Text Input label
</label>
<div class="bx--text-input__field-wrapper">
<input type="text" name="text_empty" placeholder="Optional placeholder text" data-modal-primary-focus="" class="bx--text-input" id="id_text_empty">
</div>
</div>
</div>
<div class="bx--modal-content--overflow-indicator"></div>
<div class="bx--modal-footer">
<button class="bx--btn bx--btn--secondary" type="button" data-modal-close="">
Secondary button
</button>
<button class="bx--btn bx--btn--primary" type="button">
Primary button
</button>
</div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_lg(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" size="lg" %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}Modal heading{% endSlot %}
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
{% Slot 'footer' %}
{% Button variant="secondary" type="button" data-modal-close="" %}
Secondary button
{% endButton %}
{% Button type="button" data-modal-primary-focus="" %}
Primary button
{% endButton %}
{% endSlot %}
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container bx--modal-container--lg">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Modal heading
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content" >
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
</div>
<div class="bx--modal-content--overflow-indicator"></div>
<div class="bx--modal-footer">
<button class="bx--btn bx--btn--secondary" type="button" data-modal-close="">
Secondary button
</button>
<button class="bx--btn bx--btn--primary" type="button" data-modal-primary-focus="">
Primary button
</button>
</div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_nofooter_lg(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" size="lg" %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}Modal heading{% endSlot %}
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container bx--modal-container--lg">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Modal heading
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal" data-modal-primary-focus="">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content">
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
</div>
<div class="bx--modal-content--overflow-indicator"></div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_nofooter_sm(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" size="sm" %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}Modal heading{% endSlot %}
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container bx--modal-container--sm">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Modal heading
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal" data-modal-primary-focus="">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content" >
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
</div>
<div class="bx--modal-content--overflow-indicator"></div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_nofooter_xs(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" size="xs" %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}Modal heading{% endSlot %}
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container bx--modal-container--xs">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Modal heading
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal" data-modal-primary-focus="">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content" >
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
</div>
<div class="bx--modal-content--overflow-indicator"></div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_nofooter(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}Modal heading{% endSlot %}
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Modal heading
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal" data-modal-primary-focus="">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content" >
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
</div>
<div class="bx--modal-content--overflow-indicator"></div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_scrolling(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" can_scroll=True %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}Modal heading{% endSlot %}
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<h3>Lorem ipsum</h3>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
{% Slot 'footer' %}
{% Button variant="secondary" type="button" data-modal-close="" %}
Secondary button
{% endButton %}
{% Button type="button" data-modal-primary-focus="" %}
Primary button
{% endButton %}
{% endSlot %}
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal " role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Modal heading
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content" tabindex="0">
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<h3>Lorem ipsum</h3>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
</div>
<div class="bx--modal-content--overflow-indicator"></div>
<div class="bx--modal-footer">
<button class="bx--btn bx--btn--secondary" type="button" data-modal-close="">
Secondary button
</button>
<button class="bx--btn bx--btn--primary" type="button" data-modal-primary-focus="">
Primary button
</button>
</div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_sm(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" size="sm" %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}Modal heading{% endSlot %}
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
{% Slot 'footer' %}
{% Button variant="secondary" type="button" data-modal-close="" %}
Secondary button
{% endButton %}
{% Button type="button" data-modal-primary-focus="" %}
Primary button
{% endButton %}
{% endSlot %}
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container bx--modal-container--sm">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Modal heading
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content" >
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
</div>
<div class="bx--modal-content--overflow-indicator"></div>
<div class="bx--modal-footer">
<button class="bx--btn bx--btn--secondary" type="button" data-modal-close="">
Secondary button
</button>
<button class="bx--btn bx--btn--primary" type="button" data-modal-primary-focus="">
Primary button
</button>
</div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_titleonly_nofooter_sm(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" size="sm" %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}
Passive modal title as the message. Should be direct and 3 lines or less.
{% endSlot %}
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container bx--modal-container--sm">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Passive modal title as the message. Should be direct and 3 lines or less.
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal" data-modal-primary-focus="">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content">
</div>
<div class="bx--modal-content--overflow-indicator"></div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_titleonly_nofooter_xs(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" size="xs" %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}
Passive modal title as the message. Should be direct and 3 lines or less.
{% endSlot %}
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container bx--modal-container--xs">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Passive modal title as the message. Should be direct and 3 lines or less.
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal" data-modal-primary-focus="">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content" >
</div>
<div class="bx--modal-content--overflow-indicator"></div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_titleonly_sm(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" size="sm" %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}
Passive modal title as the message. Should be direct and 3 lines or less.
{% endSlot %}
{% Slot 'footer' %}
{% Button variant="secondary" type="button" data-modal-close="" %}
Secondary button
{% endButton %}
{% Button type="button" data-modal-primary-focus="" %}
Primary button
{% endButton %}
{% endSlot %}
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container bx--modal-container--sm">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Passive modal title as the message. Should be direct and 3 lines or less.
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content" >
</div>
<div class="bx--modal-content--overflow-indicator"></div>
<div class="bx--modal-footer">
<button class="bx--btn bx--btn--secondary" type="button" data-modal-close="">
Secondary button
</button>
<button class="bx--btn bx--btn--primary" type="button" data-modal-primary-focus="">
Primary button
</button>
</div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_titleonly_xs(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" size="xs" %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}
Passive modal title as the message. Should be direct and 3 lines or less.
{% endSlot %}
{% Slot 'footer' %}
{% Button variant="secondary" type="button" data-modal-close="" %}
Secondary button
{% endButton %}
{% Button type="button" data-modal-primary-focus="" %}
Primary button
{% endButton %}
{% endSlot %}
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container bx--modal-container--xs">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Passive modal title as the message. Should be direct and 3 lines or less.
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content" >
</div>
<div class="bx--modal-content--overflow-indicator"></div>
<div class="bx--modal-footer">
<button class="bx--btn bx--btn--secondary" type="button" data-modal-close="">
Secondary button
</button>
<button class="bx--btn bx--btn--primary" type="button" data-modal-primary-focus="">
Primary button
</button>
</div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_xs(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" size="xs" %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}Modal heading{% endSlot %}
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
{% Slot 'footer' %}
{% Button variant="secondary" type="button" data-modal-close="" %}
Secondary button
{% endButton %}
{% Button type="button" data-modal-primary-focus="" %}
Primary button
{% endButton %}
{% endSlot %}
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container bx--modal-container--xs">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Modal heading
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content" >
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
</div>
<div class="bx--modal-content--overflow-indicator"></div>
<div class="bx--modal-footer">
<button class="bx--btn bx--btn--secondary" type="button" data-modal-close="">
Secondary button
</button>
<button class="bx--btn bx--btn--primary" type="button" data-modal-primary-focus="">
Primary button
</button>
</div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_default(self):
template = """
{% load carbondesign %}
{% ModalTrigger target="uid" type="button" %}{% endModalTrigger %}
{% Modal id="uid" %}
{% Slot 'label' %}Optional label{% endSlot %}
{% Slot 'heading' %}Modal heading{% endSlot %}
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
{% Slot 'footer' %}
{% Button variant="secondary" type="button" data-modal-close="" %}
Secondary button
{% endButton %}
{% Button type="button" data-modal-primary-focus="" %}
Primary button
{% endButton %}
{% endSlot %}
{% endModal %}
"""
expected = """
<button class="bx--btn bx--btn--primary" type="button" data-modal-target="#uid">
Show modal
</button>
<div data-modal id="uid" class="bx--modal" role="dialog"
aria-modal="true" tabindex="-1" aria-labelledby="label-uid" aria-describedby="heading-uid">
<div class="bx--modal-container">
<div class="bx--modal-header">
<p class="bx--modal-header__label bx--type-delta" id="label-uid">
Optional label
</p>
<p class="bx--modal-header__heading bx--type-beta" id="heading-uid">
Modal heading
</p>
<button class="bx--modal-close" type="button" data-modal-close
aria-label="close modal">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--modal-close__icon" width="16" height="16"
viewBox="0 0 32 32" aria-hidden="true">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</button>
</div>
<div class="bx--modal-content" >
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean id
accumsan augue. Phasellus consequat augue vitae tellus tincidunt posuere.
Curabitur justo urna, consectetur vel elit iaculis, ultrices condimentum
risus. Nulla facilisi.
Etiam venenatis molestie tellus. Quisque consectetur non risus eu rutrum.</p>
</div>
<div class="bx--modal-content--overflow-indicator"></div>
<div class="bx--modal-footer">
<button class="bx--btn bx--btn--secondary" type="button" data-modal-close="">
Secondary button
</button>
<button class="bx--btn bx--btn--primary" type="button" data-modal-primary-focus="">
Primary button
</button>
</div>
</div>
<span tabindex="0"></span>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
| 39.994302
| 151
| 0.660802
| 5,695
| 42,114
| 4.861984
| 0.030904
| 0.045758
| 0.062407
| 0.037379
| 0.979559
| 0.978114
| 0.977681
| 0.977681
| 0.972119
| 0.972119
| 0
| 0.032468
| 0.189676
| 42,114
| 1,052
| 152
| 40.032319
| 0.778908
| 0.002493
| 0
| 0.961698
| 0
| 0.099379
| 0.934797
| 0.157426
| 0
| 0
| 0
| 0
| 0.015528
| 1
| 0.015528
| false
| 0.008282
| 0.001035
| 0
| 0.018634
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
41ad4d8a263594c618326a8ec9e354b63d83747c
| 27,451
|
py
|
Python
|
tests/test_extractor_api_srpd.py
|
isabella232/comport
|
117123862415261095a917ed7f2037c1f986b474
|
[
"BSD-3-Clause"
] | 35
|
2015-11-14T18:32:45.000Z
|
2022-01-23T15:15:05.000Z
|
tests/test_extractor_api_srpd.py
|
codeforamerica/comport
|
117123862415261095a917ed7f2037c1f986b474
|
[
"BSD-3-Clause"
] | 119
|
2015-11-20T22:45:34.000Z
|
2022-02-10T23:02:36.000Z
|
tests/test_extractor_api_srpd.py
|
isabella232/comport
|
117123862415261095a917ed7f2037c1f986b474
|
[
"BSD-3-Clause"
] | 19
|
2015-11-20T20:41:52.000Z
|
2022-01-26T04:12:34.000Z
|
# -*- coding: utf-8 -*-
import pytest
from comport.department.models import Department, Extractor
from comport.data.models import IncidentsUpdated, OfficerInvolvedShootingSRPD, UseOfForceIncidentSRPD, CitizenComplaintSRPD, PursuitSRPD
from testclient.JSON_test_client import JSONTestClient
@pytest.mark.usefixtures('db')
class TestExtractorSRPD:
def test_post_uof_data(self, testapp):
''' New and updated UOF data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="SR Police Department", short_name="SRPD", load_defaults=False)
extractor, _ = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# post to the heartbeat URL to start the update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Post 5 fake incidents to the UOF endpoint
uof_count = 5
test_client = JSONTestClient()
uof_data = test_client.make_uof(count=uof_count, short_name=department.short_name)
response = testapp.post_json("/data/UOF", params={'month': 0, 'year': 0, 'data': uof_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 5 incident rows in the database
check_uofs = UseOfForceIncidentSRPD.query.all()
assert len(check_uofs) == uof_count
for incident in uof_data:
# verify that the opaqueIDs posted match those in the database
assert UseOfForceIncidentSRPD.query.filter_by(opaque_id=incident['opaqueId']).first() is not None
# verify that the opaqueIds are recorded in IncidentsUpdated tables
record_updated = IncidentsUpdated.query.filter_by(opaque_id=incident['opaqueId']).first()
assert record_updated is not None
assert record_updated.department_id == department.id
assert record_updated.incident_type == "uof"
# post to the heartbeat URL to start the new update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Create 5 more fake incidents
new_data = test_client.make_uof(count=uof_count, short_name=department.short_name)
# give them the same opaqueIds as the first batch
for idx, _ in enumerate(new_data):
new_data[idx]['opaqueId'] = uof_data[idx]['opaqueId']
# post the new incident rows
response = testapp.post_json("/data/UOF", params={'month': 0, 'year': 0, 'data': new_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 5 incident rows in the database
check_uofs = UseOfForceIncidentSRPD.query.all()
assert len(check_uofs) == uof_count
# verify that the opaqueIDs posted match those in the database
for incident in uof_data:
assert UseOfForceIncidentSRPD.query.filter_by(opaque_id=incident['opaqueId']).first() is not None
# Create 5 more fake incidents
new_data = test_client.make_uof(count=uof_count, short_name=department.short_name)
# give them the same opaqueIds as the first batch
for idx, _ in enumerate(new_data):
new_data[idx]['opaqueId'] = uof_data[idx]['opaqueId']
# post the new incident rows without starting a new update
response = testapp.post_json("/data/UOF", params={'month': 0, 'year': 0, 'data': new_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 10 incident rows in the database
check_uofs = UseOfForceIncidentSRPD.query.all()
assert len(check_uofs) == uof_count * 2
def test_all_uof_records_destroyed_when_new_record_posted(self, testapp):
''' Posting a new record with an id that matches a set of past records destroys all of them.
'''
# Set up the extractor
department = Department.create(name="B Police Department", short_name="SRPD", load_defaults=False)
extractor, _ = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# post to the heartbeat URL to start the update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Post 5 fake incidents with an identical opaqueId to the UOF endpoint
uof_count = 5
test_client = JSONTestClient()
uof_data = test_client.make_uof(count=uof_count, short_name=department.short_name)
use_id = uof_data[0]['opaqueId']
for idx, _ in enumerate(uof_data):
uof_data[idx]['opaqueId'] = use_id
response = testapp.post_json("/data/UOF", params={'month': 0, 'year': 0, 'data': uof_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 5 incident rows in the database
check_uofs = UseOfForceIncidentSRPD.query.all()
assert len(check_uofs) == uof_count
# all the records in the database have the same opaqueId
uof_records = UseOfForceIncidentSRPD.query.filter_by(opaque_id=use_id).all()
assert len(uof_records) == uof_count
# verify that the opaqueId is recorded in an IncidentsUpdated table
record_updated = IncidentsUpdated.query.filter_by(opaque_id=use_id).first()
assert record_updated is not None
assert record_updated.incident_type == "uof"
assert record_updated.department_id == department.id
# post to the heartbeat URL to start a new update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Create 1 new fake incident
new_data = test_client.make_uof(count=1, short_name=department.short_name)
# give it the same opaqueId as the first batch
new_data[0]['opaqueId'] = use_id
# post the new incident row
response = testapp.post_json("/data/UOF", params={'month': 0, 'year': 0, 'data': new_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there is now only 1 incident row in the database
check_uofs = UseOfForceIncidentSRPD.query.all()
assert len(check_uofs) == 1
# verify that the opaqueID posted matches that in the database
assert check_uofs[0].opaque_id == use_id
# verify that the opaqueId is recorded in an IncidentsUpdated table
record_updated = IncidentsUpdated.query.filter_by(opaque_id=use_id).first()
assert record_updated is not None
assert record_updated.incident_type == "uof"
assert record_updated.department_id == department.id
def test_post_complaints_data(self, testapp):
''' New and updated complaints data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="SR Police Department", short_name="SRPD", load_defaults=False)
extractor, _ = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# post to the heartbeat URL to start the update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Post 5 fake incidents to the complaints endpoint
complaints_count = 5
test_client = JSONTestClient()
complaints_data = test_client.make_complaints(count=complaints_count, short_name=department.short_name)
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': complaints_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 5 incident rows in the database
check_complaints = CitizenComplaintSRPD.query.all()
assert len(check_complaints) == complaints_count
for incident in complaints_data:
# verify that the opaqueIDs posted match those in the database
assert CitizenComplaintSRPD.query.filter_by(opaque_id=incident['opaqueId']).first() is not None
# verify that the opaqueIds are recorded in IncidentsUpdated tables
record_updated = IncidentsUpdated.query.filter_by(opaque_id=incident['opaqueId']).first()
assert record_updated is not None
assert record_updated.incident_type == "complaints"
assert record_updated.department_id == department.id
# post to the heartbeat URL to start a new update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Create 5 more fake incidents
new_data = test_client.make_complaints(count=complaints_count, short_name=department.short_name)
# give them the same opaqueIds as the first batch
for idx, _ in enumerate(new_data):
new_data[idx]['opaqueId'] = complaints_data[idx]['opaqueId']
# post the new incident rows
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': new_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 5 incident rows in the database
check_complaints = CitizenComplaintSRPD.query.all()
assert len(check_complaints) == complaints_count
# verify that the opaqueIDs posted match those in the database
for incident in complaints_data:
assert CitizenComplaintSRPD.query.filter_by(opaque_id=incident['opaqueId']).first() is not None
# Create 5 more fake incidents
new_data = test_client.make_complaints(count=complaints_count, short_name=department.short_name)
# give them the same opaqueIds as the first batch
for idx, _ in enumerate(new_data):
new_data[idx]['opaqueId'] = complaints_data[idx]['opaqueId']
# post the new incident rows without starting a new update
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': new_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 10 incident rows in the database
check_complaints = CitizenComplaintSRPD.query.all()
assert len(check_complaints) == complaints_count * 2
def test_all_complaints_records_destroyed_when_new_record_posted(self, testapp):
''' Posting a new record with an id that matches a set of past records destroys all of them.
'''
# Set up the extractor
department = Department.create(name="SR Police Department", short_name="SRPD", load_defaults=False)
extractor, _ = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# post to the heartbeat URL to start the update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Post 5 fake incidents with an identical opaqueId to the complaint endpoint
complaints_count = 5
test_client = JSONTestClient()
complaints_data = test_client.make_complaints(count=complaints_count, short_name=department.short_name)
use_id = complaints_data[0]['opaqueId']
for idx, _ in enumerate(complaints_data):
complaints_data[idx]['opaqueId'] = use_id
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': complaints_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 5 incident rows in the database
check_complaints = CitizenComplaintSRPD.query.all()
assert len(check_complaints) == complaints_count
# all the records in the database have the same id
complaint_records = CitizenComplaintSRPD.query.filter_by(opaque_id=use_id).all()
assert len(complaint_records) == complaints_count
# verify that the opaqueId is recorded in an IncidentsUpdated table
record_updated = IncidentsUpdated.query.filter_by(opaque_id=use_id).first()
assert record_updated is not None
assert record_updated.incident_type == "complaints"
assert record_updated.department_id == department.id
# post to the heartbeat URL to start a new update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Create 1 new fake incident
new_data = test_client.make_complaints(count=1, short_name=department.short_name)
# give it the same opaqueId as the first batch
new_data[0]['opaqueId'] = use_id
# post the new incident
response = testapp.post_json("/data/complaints", params={'month': 0, 'year': 0, 'data': new_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there is now only 1 incident row in the database
check_complaints = CitizenComplaintSRPD.query.all()
assert len(check_complaints) == 1
# verify that the opaqueID posted matches that in the database
assert check_complaints[0].opaque_id == use_id
# verify that the opaqueId is recorded in an IncidentsUpdated table
record_updated = IncidentsUpdated.query.filter_by(opaque_id=use_id).first()
assert record_updated is not None
assert record_updated.incident_type == "complaints"
assert record_updated.department_id == department.id
def test_post_ois_data(self, testapp):
''' New and updated OIS data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="SR Police Department", short_name="SRPD", load_defaults=False)
extractor, _ = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# post to the heartbeat URL to start the update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Post 5 fake incidents to the OIS endpoint
ois_count = 5
test_client = JSONTestClient()
ois_data = test_client.make_ois(count=ois_count, short_name=department.short_name)
response = testapp.post_json("/data/OIS", params={'month': 0, 'year': 0, 'data': ois_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 5 incident rows in the database
check_ois = OfficerInvolvedShootingSRPD.query.all()
assert len(check_ois) == ois_count
for incident in ois_data:
# verify that the opaqueIDs posted match those in the database
assert OfficerInvolvedShootingSRPD.query.filter_by(opaque_id=incident['opaqueId']).first() is not None
# verify that the opaqueIds are recorded in IncidentsUpdated tables
record_updated = IncidentsUpdated.query.filter_by(opaque_id=incident['opaqueId']).first()
assert record_updated is not None
assert record_updated.incident_type == "ois"
assert record_updated.department_id == department.id
# post to the heartbeat URL to start a new update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Create 5 more fake incidents
new_data = test_client.make_ois(count=ois_count, short_name=department.short_name)
# give them the same opaqueIds as the first batch
for idx, _ in enumerate(new_data):
new_data[idx]['opaqueId'] = ois_data[idx]['opaqueId']
# post the new incident rows
response = testapp.post_json("/data/OIS", params={'month': 0, 'year': 0, 'data': new_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 5 incident rows in the database
check_ois = OfficerInvolvedShootingSRPD.query.all()
assert len(check_ois) == ois_count
# verify that the opaqueIDs posted match those in the database
for incident in ois_data:
assert OfficerInvolvedShootingSRPD.query.filter_by(opaque_id=incident['opaqueId']).first() is not None
# Create 5 more fake incidents
new_data = test_client.make_ois(count=ois_count, short_name=department.short_name)
# give them the same opaqueIds as the first batch
for idx, _ in enumerate(new_data):
new_data[idx]['opaqueId'] = ois_data[idx]['opaqueId']
# post the new incident rows without starting a new update
response = testapp.post_json("/data/OIS", params={'month': 0, 'year': 0, 'data': new_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 10 incident rows in the database
check_ois = OfficerInvolvedShootingSRPD.query.all()
assert len(check_ois) == ois_count * 2
def test_all_ois_records_destroyed_when_new_record_posted(self, testapp):
''' Posting a new record with an id that matches a set of past records destroys all of them.
'''
# Set up the extractor
department = Department.create(name="SR Police Department", short_name="SRPD", load_defaults=False)
extractor, _ = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# post to the heartbeat URL to start the update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Post 5 fake incidents with an identical opaqueId to the OIS endpoint
ois_count = 5
test_client = JSONTestClient()
ois_data = test_client.make_ois(count=ois_count, short_name=department.short_name)
use_id = ois_data[0]['opaqueId']
for idx, _ in enumerate(ois_data):
ois_data[idx]['opaqueId'] = use_id
response = testapp.post_json("/data/OIS", params={'month': 0, 'year': 0, 'data': ois_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 5 incident rows in the database
check_ois = OfficerInvolvedShootingSRPD.query.all()
assert len(check_ois) == ois_count
# all the records in the database have the same id
ois_records = OfficerInvolvedShootingSRPD.query.filter_by(opaque_id=use_id).all()
assert len(ois_records) == ois_count
# verify that the opaqueId is recorded in an IncidentsUpdated table
record_updated = IncidentsUpdated.query.filter_by(opaque_id=use_id).first()
assert record_updated is not None
assert record_updated.incident_type == "ois"
assert record_updated.department_id == department.id
# post to the heartbeat URL to start the update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Create 1 new fake incident
new_data = test_client.make_ois(count=1, short_name=department.short_name)
# give it the same opaqueId as the first batch
new_data[0]['opaqueId'] = use_id
# post the new incident
response = testapp.post_json("/data/OIS", params={'month': 0, 'year': 0, 'data': new_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there is now only 1 incident row in the database
check_ois = OfficerInvolvedShootingSRPD.query.all()
assert len(check_ois) == 1
# verify that the opaqueID posted matches that in the database
assert check_ois[0].opaque_id == use_id
# verify that the opaqueId is recorded in an IncidentsUpdated table
record_updated = IncidentsUpdated.query.filter_by(opaque_id=use_id).first()
assert record_updated is not None
assert record_updated.incident_type == "ois"
assert record_updated.department_id == department.id
def test_post_pursuits_data(self, testapp):
''' New and updated pursuits data from the extractor is processed as expected.
'''
# Set up the extractor
department = Department.create(name="SR Police Department", short_name="SRPD", load_defaults=False)
extractor, _ = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# post to the heartbeat URL to start the update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Post 5 fake incidents to the pursuits endpoint
pursuits_count = 5
test_client = JSONTestClient()
pursuits_data = test_client.make_pursuits(count=pursuits_count, short_name=department.short_name)
response = testapp.post_json("/data/pursuits", params={'month': 0, 'year': 0, 'data': pursuits_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 5 incident rows in the database
check_pursuits = PursuitSRPD.query.all()
assert len(check_pursuits) == pursuits_count
for incident in pursuits_data:
# verify that the opaqueIDs posted match those in the database
assert PursuitSRPD.query.filter_by(opaque_id=incident['opaqueId']).first() is not None
# verify that the opaqueIds are recorded in IncidentsUpdated tables
record_updated = IncidentsUpdated.query.filter_by(opaque_id=incident['opaqueId']).first()
assert record_updated is not None
assert record_updated.department_id == department.id
assert record_updated.incident_type == "pursuits"
# post to the heartbeat URL to start the new update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Create 5 more fake incidents
new_data = test_client.make_pursuits(count=pursuits_count, short_name=department.short_name)
# give them the same opaqueIds as the first batch
for idx, _ in enumerate(new_data):
new_data[idx]['opaqueId'] = pursuits_data[idx]['opaqueId']
# post the new incident rows
response = testapp.post_json("/data/pursuits", params={'month': 0, 'year': 0, 'data': new_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 5 incident rows in the database
check_pursuits = PursuitSRPD.query.all()
assert len(check_pursuits) == pursuits_count
# verify that the opaqueIDs posted match those in the database
for incident in pursuits_data:
assert PursuitSRPD.query.filter_by(opaque_id=incident['opaqueId']).first() is not None
# Create 5 more fake incidents
new_data = test_client.make_pursuits(count=pursuits_count, short_name=department.short_name)
# give them the same opaqueIds as the first batch
for idx, _ in enumerate(new_data):
new_data[idx]['opaqueId'] = pursuits_data[idx]['opaqueId']
# post the new incident rows without starting a new update
response = testapp.post_json("/data/pursuits", params={'month': 0, 'year': 0, 'data': new_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 10 incident rows in the database
check_pursuits = PursuitSRPD.query.all()
assert len(check_pursuits) == pursuits_count * 2
def test_all_pursuits_records_destroyed_when_new_record_posted(self, testapp):
''' Posting a new record with an id that matches a set of past records destroys all of them.
'''
# Set up the extractor
department = Department.create(name="B Police Department", short_name="SRPD", load_defaults=False)
extractor, _ = Extractor.from_department_and_password(department=department, password="password")
# Set the correct authorization
testapp.authorization = ('Basic', (extractor.username, 'password'))
# post to the heartbeat URL to start the update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Post 5 fake incidents with an identical opaqueId to the pursuits endpoint
pursuits_count = 5
test_client = JSONTestClient()
pursuits_data = test_client.make_pursuits(count=pursuits_count, short_name=department.short_name)
use_id = pursuits_data[0]['opaqueId']
for idx, _ in enumerate(pursuits_data):
pursuits_data[idx]['opaqueId'] = use_id
response = testapp.post_json("/data/pursuits", params={'month': 0, 'year': 0, 'data': pursuits_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there are 5 incident rows in the database
check_pursuits = PursuitSRPD.query.all()
assert len(check_pursuits) == pursuits_count
# all the records in the database have the same opaqueId
pursuits_records = PursuitSRPD.query.filter_by(opaque_id=use_id).all()
assert len(pursuits_records) == pursuits_count
# verify that the opaqueId is recorded in an IncidentsUpdated table
record_updated = IncidentsUpdated.query.filter_by(opaque_id=use_id).first()
assert record_updated is not None
assert record_updated.incident_type == "pursuits"
assert record_updated.department_id == department.id
# post to the heartbeat URL to start a new update
response = testapp.post_json("/data/heartbeat", params={"heartbeat": "heartbeat"})
# Create 1 new fake incident
new_data = test_client.make_pursuits(count=1, short_name=department.short_name)
# give it the same opaqueId as the first batch
new_data[0]['opaqueId'] = use_id
# post the new incident row
response = testapp.post_json("/data/pursuits", params={'month': 0, 'year': 0, 'data': new_data})
# assert that we got the expected reponse
assert response.status_code == 200
# there is now only 1 incident row in the database
check_pursuits = PursuitSRPD.query.all()
assert len(check_pursuits) == 1
# verify that the opaqueID posted matches that in the database
assert check_pursuits[0].opaque_id == use_id
# verify that the opaqueId is recorded in an IncidentsUpdated table
record_updated = IncidentsUpdated.query.filter_by(opaque_id=use_id).first()
assert record_updated is not None
assert record_updated.incident_type == "pursuits"
assert record_updated.department_id == department.id
| 48.585841
| 136
| 0.682635
| 3,468
| 27,451
| 5.234429
| 0.041522
| 0.023798
| 0.03768
| 0.045612
| 0.966562
| 0.956426
| 0.945243
| 0.937311
| 0.936209
| 0.936209
| 0
| 0.008389
| 0.231431
| 27,451
| 564
| 137
| 48.671986
| 0.852024
| 0.259043
| 0
| 0.820144
| 0
| 0
| 0.088351
| 0
| 0
| 0
| 0
| 0
| 0.330935
| 1
| 0.028777
| false
| 0.057554
| 0.014388
| 0
| 0.046763
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
68f790357010088dbe1f74ae361a38867701d162
| 14,765
|
py
|
Python
|
src/edge/tests/test_crispr.py
|
pflans/edge
|
d84de02bd9b334212b7405ecc4e68d3a209add99
|
[
"MIT"
] | 32
|
2017-05-15T06:03:46.000Z
|
2022-02-18T08:30:19.000Z
|
src/edge/tests/test_crispr.py
|
pflans/edge
|
d84de02bd9b334212b7405ecc4e68d3a209add99
|
[
"MIT"
] | 36
|
2017-05-11T01:29:14.000Z
|
2022-02-10T07:31:24.000Z
|
src/edge/tests/test_crispr.py
|
pflans/edge
|
d84de02bd9b334212b7405ecc4e68d3a209add99
|
[
"MIT"
] | 4
|
2017-09-19T18:10:45.000Z
|
2019-11-29T03:38:08.000Z
|
import os
import json
from django.test import TestCase
from edge.models import Genome, Operation, Fragment, Genome_Fragment
from edge.blastdb import build_all_genome_dbs, fragment_fasta_fn
from edge.crispr import find_crispr_target, crispr_dsb
from Bio.Seq import Seq
class GenomeCrisprDSBTest(TestCase):
def build_genome(self, circular, *sequences):
g = Genome(name="Foo")
g.save()
for seq in sequences:
f = Fragment.create_with_sequence("Bar", seq, circular=circular)
Genome_Fragment(genome=g, fragment=f, inherited=False).save()
try:
os.unlink(fragment_fasta_fn(f))
except OSError:
pass
build_all_genome_dbs(refresh=True)
return Genome.objects.get(pk=g.id)
def test_find_crispr_target_finds_target_on_forward_strand(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
g = self.build_genome(False, s1 + pam + s2)
guide = s1[-20:]
t = find_crispr_target(g, guide, "ngg")
self.assertEquals(len(t), 1)
self.assertEquals(t[0].fragment_id, g.fragments.all()[0].id)
self.assertEquals(t[0].fragment_name, g.fragments.all()[0].name)
self.assertEquals(t[0].subject_start, s1.index(guide) + 1)
self.assertEquals(t[0].subject_end, len(s1))
self.assertEquals(t[0].pam, "ngg")
def test_find_crispr_target_only_finds_perfect_match_to_guide(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
g = self.build_genome(False, s1 + pam + s2)
guide = "aaaaa" + s1[-15:]
t = find_crispr_target(g, guide, "ngg")
self.assertEquals(len(t), 0)
def test_find_crispr_target_finds_target_on_reverse_strand(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
g = self.build_genome(False, str(Seq(s1 + pam + s2).reverse_complement()))
guide = s1[-20:]
t = find_crispr_target(g, guide, "ngg")
self.assertEquals(len(t), 1)
self.assertEquals(t[0].fragment_id, g.fragments.all()[0].id)
self.assertEquals(t[0].fragment_name, g.fragments.all()[0].name)
self.assertEquals(t[0].subject_end, len(s2) + 3 + 1)
self.assertEquals(t[0].subject_start, len(s2) + 3 + 1 + 20 - 1)
self.assertEquals(t[0].pam, "ngg")
def test_find_crispr_target_finds_multiple_crispr_targets(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
g = self.build_genome(
False, (s1 + pam + s2) + str(Seq(s1 + pam + s2).reverse_complement())
)
guide = s1[-20:]
t = find_crispr_target(g, guide, "ngg")
self.assertEquals(len(t), 2)
self.assertEquals(t[0].fragment_id, g.fragments.all()[0].id)
self.assertEquals(t[0].fragment_name, g.fragments.all()[0].name)
self.assertEquals(t[0].subject_start, s1.index(guide) + 1)
self.assertEquals(t[0].subject_end, len(s1))
self.assertEquals(t[0].pam, "ngg")
self.assertEquals(t[1].fragment_id, g.fragments.all()[0].id)
self.assertEquals(t[1].fragment_name, g.fragments.all()[0].name)
self.assertEquals(t[1].subject_end, len(s1 + pam + s2) + len(s2) + 3 + 1)
self.assertEquals(
t[1].subject_start, len(s1 + pam + s2) + len(s2) + 3 + 1 + 20 - 1
)
self.assertEquals(t[1].pam, "ngg")
def test_find_crispr_target_does_not_find_target_without_pam(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "ccc"
g = self.build_genome(False, s1 + pam + s2)
guide = s1[-20:]
t = find_crispr_target(g, guide, "ngg")
self.assertEquals(len(t), 0)
def test_find_crispr_target_does_not_find_target_with_part_of_pam(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgc"
g = self.build_genome(False, s1 + pam + s2)
guide = s1[-20:]
t = find_crispr_target(g, guide, "ngg")
self.assertEquals(len(t), 0)
def test_find_crispr_target_finds_target_across_circular_boundary(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
s = s1[10:] + pam + s2 + s1[0:10]
g = self.build_genome(True, s)
guide = s1[-20:]
t = find_crispr_target(g, guide, "ngg")
self.assertEquals(len(t), 1)
self.assertEquals(t[0].fragment_id, g.fragments.all()[0].id)
self.assertEquals(t[0].fragment_name, g.fragments.all()[0].name)
self.assertEquals(
t[0].subject_start, (s1.index(guide) + 1 - 10 - 1) % len(s) + 1
)
self.assertEquals(t[0].subject_end, (len(s1) - 10 - 1) % len(s) + 1)
self.assertEquals(t[0].pam, "ngg")
def test_find_crispr_target_finds_target_with_pam_across_circular_boundary(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
s = pam[1:] + s2 + s1 + pam[:1]
g = self.build_genome(True, s)
guide = s1[-20:]
t = find_crispr_target(g, guide, "ngg")
self.assertEquals(len(t), 1)
self.assertEquals(t[0].fragment_id, g.fragments.all()[0].id)
self.assertEquals(t[0].fragment_name, g.fragments.all()[0].name)
self.assertEquals(t[0].subject_start, len(pam[1:] + s2) + s1.index(guide) + 1)
self.assertEquals(t[0].subject_end, len(s) - 1)
self.assertEquals(t[0].pam, "ngg")
def test_find_crispr_target_finds_reverse_complement_across_circular_boundary(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
s = s1[10:] + pam + s2 + s1[0:10]
g = self.build_genome(True, str(Seq(s).reverse_complement()))
guide = s1[-20:]
t = find_crispr_target(g, guide, "ngg")
self.assertEquals(len(t), 1)
self.assertEquals(t[0].fragment_id, g.fragments.all()[0].id)
self.assertEquals(t[0].fragment_name, g.fragments.all()[0].name)
self.assertEquals(
t[0].subject_start, (len(s) - (s1.index(guide) + 1 - 10)) % len(s) + 1
)
self.assertEquals(t[0].subject_end, (len(s) - (len(s1) - 10)) % len(s) + 1)
self.assertEquals(t[0].pam, "ngg")
def test_find_crispr_target_finds_reverse_complement_with_pam_across_circular_boundary(
self,
):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
s = pam[1:] + s2 + s1 + pam[:1]
g = self.build_genome(True, str(Seq(s).reverse_complement()))
guide = s1[-20:]
t = find_crispr_target(g, guide, "ngg")
self.assertEquals(len(t), 1)
self.assertEquals(t[0].fragment_id, g.fragments.all()[0].id)
self.assertEquals(t[0].fragment_name, g.fragments.all()[0].name)
self.assertEquals(t[0].subject_end, 2)
self.assertEquals(t[0].subject_start, 2 + len(guide) - 1)
self.assertEquals(t[0].pam, "ngg")
def test_crispr_dsb_finds_and_annotates_target_on_forward_strand(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
g = self.build_genome(False, s1 + pam + s2)
guide = s1[-20:]
a = g.fragments.all()[0].indexed_fragment().annotations()
self.assertEquals(len(a), 0)
c = crispr_dsb(g, guide, "ngg")
self.assertNotEquals(c.id, g.id)
self.assertEquals(c.parent.id, g.id)
a = c.fragments.all()[0].indexed_fragment().annotations()
self.assertEquals(len(a), 1)
self.assertEquals(a[0].base_first, s1.index(guide) + 1)
self.assertEquals(a[0].base_last, len(s1))
self.assertEquals(a[0].feature_base_first, 1)
self.assertEquals(a[0].feature_base_last, len(guide))
self.assertEquals(a[0].feature.strand, 1)
self.assertEquals(a[0].feature.name, "CRISPR-Cas9 (pam ngg) target")
self.assertEquals(a[0].feature.operation.type, Operation.CRISPR_DSB[0])
self.assertEquals(a[0].feature.operation.genome, c)
# annotation is visible in parent genome, since it's not on new base pairs
a = g.fragments.all()[0].indexed_fragment().annotations()
self.assertEquals(len(a), 1)
self.assertEquals(a[0].feature.operation.genome, c)
def test_crispr_dsb_finds_and_annotates_target_on_reverse_strand(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
g = self.build_genome(False, str(Seq(s1 + pam + s2).reverse_complement()))
guide = s1[-20:]
c = crispr_dsb(g, guide, "ngg")
self.assertNotEquals(c.id, g.id)
self.assertEquals(c.parent.id, g.id)
a = c.fragments.all()[0].indexed_fragment().annotations()
self.assertEquals(len(a), 1)
self.assertEquals(a[0].base_first, len(s2) + 3 + 1)
self.assertEquals(a[0].base_last, len(s2) + 3 + 1 + 20 - 1)
self.assertEquals(a[0].feature_base_first, 1)
self.assertEquals(a[0].feature_base_last, len(guide))
self.assertEquals(a[0].feature.strand, -1)
self.assertEquals(a[0].feature.name, "CRISPR-Cas9 (pam ngg) target")
self.assertEquals(a[0].feature.operation.type, Operation.CRISPR_DSB[0])
self.assertEquals(a[0].feature.operation.genome, c)
def test_crispr_dsb_finds_and_annotates_target_across_circular_boundary(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
s = s1[-15:] + pam + s2 + s1[0 : len(s1) - 15]
g = self.build_genome(True, s)
guide = s1[-20:]
c = crispr_dsb(g, guide, "ngg")
self.assertNotEquals(c.id, g.id)
self.assertEquals(c.parent.id, g.id)
a = c.fragments.all()[0].indexed_fragment().annotations()
self.assertEquals(len(a), 2)
self.assertEquals(a[0].base_first, 1)
self.assertEquals(a[0].base_last, 15)
self.assertEquals(a[0].feature_base_first, 6)
self.assertEquals(a[0].feature_base_last, 20)
self.assertEquals(a[0].feature.strand, 1)
self.assertEquals(a[1].base_first, len(s) - 5 + 1)
self.assertEquals(a[1].base_last, len(s))
self.assertEquals(a[1].feature_base_first, 1)
self.assertEquals(a[1].feature_base_last, 5)
self.assertEquals(a[1].feature.strand, 1)
def test_crispr_dsb_finds_and_annotates_reverse_complement_across_circular_boundary(
self,
):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
s = s1[-15:] + pam + s2 + s1[0 : len(s1) - 15]
g = self.build_genome(True, str(Seq(s).reverse_complement()))
guide = s1[-20:]
c = crispr_dsb(g, guide, "ngg")
self.assertNotEquals(c.id, g.id)
self.assertEquals(c.parent.id, g.id)
a = c.fragments.all()[0].indexed_fragment().annotations()
self.assertEquals(len(a), 2)
self.assertEquals(a[0].base_first, 1)
self.assertEquals(a[0].base_last, 5)
self.assertEquals(a[0].feature_base_first, 1)
self.assertEquals(a[0].feature_base_last, 5)
self.assertEquals(a[0].feature.strand, -1)
self.assertEquals(a[1].base_first, len(s) - 15 + 1)
self.assertEquals(a[1].base_last, len(s))
self.assertEquals(a[1].feature_base_first, 6)
self.assertEquals(a[1].feature_base_last, 20)
self.assertEquals(a[1].feature.strand, -1)
def test_crispr_dsb_creates_operations(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
g = self.build_genome(False, s1 + pam + s2)
guide = s1[-20:]
c = crispr_dsb(g, guide, "ngg")
self.assertEquals(g.operation_set.count(), 0)
self.assertEquals(c.operation_set.count(), 1)
self.assertEquals(c.operation_set.all()[0].type, Operation.CRISPR_DSB[0])
def test_crispr_dsb_creates_new_fragment(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
g = self.build_genome(False, s1 + pam + s2)
guide = s1[-20:]
c = crispr_dsb(g, guide, "ngg")
self.assertEquals(c.fragments.all()[0].parent, g.fragments.all()[0])
def test_crispr_dsb_api_works(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
g = self.build_genome(False, s1 + pam + s2)
guide = s1[-20:]
data = dict(
genome_name="FooBar", notes="blah", guide=guide, pam="ngg", create=True
)
res = self.client.post(
"/edge/genomes/" + str(g.id) + "/crispr/dsb/",
data=json.dumps(data),
content_type="application/json",
)
self.assertEquals(res.status_code, 201)
self.assertEquals(Operation.objects.count(), 1)
self.assertEquals(g.children.count(), 1)
c = g.children.all()[0]
a = c.fragments.all()[0].indexed_fragment().annotations()
self.assertEquals(len(a), 1)
self.assertEquals(a[0].feature.operation.genome, c)
self.assertEquals(c.operation_set.all()[0].type, Operation.CRISPR_DSB[0])
self.assertEquals(
c.operation_set.all()[0].params, json.dumps(dict(guide=guide, pam="ngg"))
)
def test_multiple_api_calls_return_same_child(self):
s1 = "agaaggtctggtagcgatgtagtcgatct"
s2 = "gactaggtacgtagtcgtcaggtcagtca"
pam = "cgg"
g = self.build_genome(False, s1 + pam + s2)
guide = s1[-20:]
data = dict(
genome_name="FooBar", notes="blah", guide=guide, pam="ngg", create=True
)
res = self.client.post(
"/edge/genomes/" + str(g.id) + "/crispr/dsb/",
data=json.dumps(data),
content_type="application/json",
)
self.assertEquals(res.status_code, 201)
r = json.loads(res.content)
c1 = r["id"]
res = self.client.post(
"/edge/genomes/" + str(g.id) + "/crispr/dsb/",
data=json.dumps(data),
content_type="application/json",
)
# returns 200 not 201
self.assertEquals(res.status_code, 200)
r = json.loads(res.content)
c2 = r["id"]
self.assertEquals(c1, c2)
| 42.065527
| 91
| 0.618083
| 1,924
| 14,765
| 4.5842
| 0.081601
| 0.201361
| 0.077098
| 0.071429
| 0.888776
| 0.869841
| 0.860431
| 0.814966
| 0.800454
| 0.7822
| 0
| 0.036271
| 0.240027
| 14,765
| 350
| 92
| 42.185714
| 0.749755
| 0.006231
| 0
| 0.685897
| 0
| 0
| 0.09516
| 0.071166
| 0
| 0
| 0
| 0
| 0.36859
| 1
| 0.060897
| false
| 0.003205
| 0.022436
| 0
| 0.089744
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ec03bd161822f59fb15461d3fc97fd4e9b0e7932
| 15,197
|
py
|
Python
|
fnat_testset/testcase/lrg2/static_address.py
|
lizhouw-netscout/fnat
|
684958773379a9205857f1932de443ed0c4334a0
|
[
"Apache-2.0"
] | null | null | null |
fnat_testset/testcase/lrg2/static_address.py
|
lizhouw-netscout/fnat
|
684958773379a9205857f1932de443ed0c4334a0
|
[
"Apache-2.0"
] | null | null | null |
fnat_testset/testcase/lrg2/static_address.py
|
lizhouw-netscout/fnat
|
684958773379a9205857f1932de443ed0c4334a0
|
[
"Apache-2.0"
] | null | null | null |
from fnat_dev import FnatDevice
from lrg2_app import app_lrg2
import time
def setUp():
print('These are Static_ip_address testset')
def tearDown():
print('These are Static_ip_address testset')
def testmethod_1():
'''<case_id>63</case_id>'''
print "Method testmethod_1 in class Static_ip_address_test suite"
print "Method set the full static IP address and subnet, which is not duplicated in Static_ip_address_test suite"
lrg2 = app_lrg2.app_lrg2()
lrg2.launch_from_icon()
lrg2.set_static_address("192.168.10.63", "255.255.255.0", "192.168.10.1", "192.168.0.0", "192.168.10.1")
lrg2.wait_for_object_v(lrg2.autotest_icon_static_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_static_card(), "green", 6)
lrg2.wait_for_object_v(lrg2.autotest_icon_dns_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_dns_card(), "red", 6)
lrg2.wait_for_object_description(lrg2.autotest_icon_gateway_card(), "green", 3)
lrg2.wait_for_object_v(lrg2.autotest_text_cloud(), 15, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_gateway_card(), "green", 15)
def testmethod_2():
'''<case_id>61</case_id>'''
print "Method testmethod_2 in class Static_ip_address_test suite"
print "Method set the static IP address and subnet, which is not duplicated in Static_ip_address_test suite"
lrg2 = app_lrg2.app_lrg2()
lrg2.launch_from_icon()
lrg2.set_static_address("192.168.10.63", "255.255.255.0", "", "", "")
lrg2.wait_for_object_v(lrg2.autotest_icon_static_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_static_card(), "green", 6)
static_value = lrg2.autotest_text_static_address().text
assert str(static_value) == "192.168.10.63"
lrg2.wait_for_object_v(lrg2.autotest_text_cloud(), 40, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_cloud_card(), "red", 15)
cloud_value = lrg2.autotest_text_cloud().text
assert str(cloud_value) == "DNS Lookup Failed"
def testmethod_3():
'''<case_id>65</case_id>'''
print "Method testmethod_3 in class Static_ip_address_test suite"
print "Method set the full static IP address and subnet, which is duplicated in Static_ip_address_test suite"
lrg2 = app_lrg2.app_lrg2()
lrg2.launch_from_icon()
lrg2.set_static_address("192.168.10.1", "255.255.255.0", "192.168.10.1", "192.168.10.1", "192.168.10.2")
lrg2.wait_for_object_v(lrg2.autotest_icon_static_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_static_card(), "red", 6)
static_value = lrg2.autotest_text_static_address().text
assert str(static_value) == "Duplicate IP Address"
lrg2.wait_for_object_v(lrg2.autotest_text_cloud(), 40, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_cloud_card(), "red", 15)
cloud_value = lrg2.autotest_text_cloud().text
assert str(cloud_value) == "Duplicate IP Address"
def testmethod_4():
'''<case_id>64</case_id>'''
print "Method testmethod_4 in class Static_ip_address_test suite"
print "Method set the static IP address and subnet, which is duplicated in Static_ip_address_test suite"
lrg2 = app_lrg2.app_lrg2()
lrg2.launch_from_icon()
lrg2.set_static_address("192.168.10.1", "255.255.255.0", "", "", "")
lrg2.wait_for_object_v(lrg2.autotest_icon_static_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_static_card(), "red", 6)
static_value = lrg2.autotest_text_static_address().text
assert str(static_value) == "Duplicate IP Address"
lrg2.wait_for_object_v(lrg2.autotest_text_cloud(), 40, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_cloud_card(), "red", 15)
cloud_value = lrg2.autotest_text_cloud().text
assert str(cloud_value) == "Duplicate IP Address"
def testmethod_5():
'''<case_id>66</case_id>'''
print "Method testmethod_5 in class Static_ip_address_test suite"
print "Method set the default gateway, which is in the subnet in Static_ip_address_test suite"
lrg2 = app_lrg2.app_lrg2()
lrg2.launch_from_icon()
lrg2.set_static_address("192.168.10.63", "255.255.255.0", "192.168.10.1", "192.168.10.1", "")
lrg2.wait_for_object_v(lrg2.autotest_icon_static_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_static_card(), "green", 6)
static_value = lrg2.autotest_text_static_address().text
assert str(static_value) == "192.168.10.63"
lrg2.wait_for_object_v(lrg2.autotest_icon_dns_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_dns_card(), "green", 6)
dns_value = lrg2.autotest_text_dns().text
assert dns_value.rstrip() == "192.168.10.1"
lrg2.wait_for_object_description(lrg2.autotest_icon_gateway_card(), "green", 6)
gate_value = lrg2.autotest_text_gateway().text
assert str(gate_value) == "192.168.10.1"
lrg2.wait_for_object_v(lrg2.autotest_text_cloud(), 15, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_cloud_card(), "green", 15)
cloud_value = lrg2.autotest_text_cloud().text
assert str(cloud_value) == "Link-Live.com"
def testmethod_6():
'''<case_id>67</case_id>'''
print "Method testmethod_6 in class Static_ip_address_test suite"
print "Method set the default gateway, which is out of the subnet in Static_ip_address_test suite"
lrg2 = app_lrg2.app_lrg2()
lrg2.launch_from_icon()
lrg2.set_static_address("192.168.10.63", "255.255.255.0", "192.168.2.100", "192.168.10.1", "")
lrg2.wait_for_object_v(lrg2.autotest_icon_static_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_static_card(), "green", 6)
static_value = lrg2.autotest_text_static_address().text
assert str(static_value) == "192.168.10.63"
lrg2.wait_for_object_v(lrg2.autotest_icon_dns_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_dns_card(), "green", 6)
dns_value = lrg2.autotest_text_dns().text
assert dns_value.rstrip() == "192.168.10.1"
lrg2.wait_for_object_description(lrg2.autotest_icon_gateway_card(), "red", 6)
lrg2.autotest_text_gateway().click()
gate_value = lrg2.autotest_text_gateway_pingtime().text
assert str(gate_value) == " --, --, --"
lrg2.autotest_text_gateway().click()
lrg2.wait_for_object_v(lrg2.autotest_text_cloud(), 15, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_cloud_card(), "red", 15)
cloud_value = lrg2.autotest_text_cloud().text
assert str(cloud_value) == "Can not connect to server."
def testmethod_7():
'''<case_id>68</case_id>'''
print "Method testmethod_7 in class Static_ip_address_test suite"
print "Method set DNS1,not set DNS2 in Static_ip_address_test suite"
lrg2 = app_lrg2.app_lrg2()
lrg2.launch_from_icon()
lrg2.set_static_address("192.168.10.63", "255.255.255.0", "192.168.10.1", "192.168.10.1", "")
lrg2.wait_for_object_v(lrg2.autotest_icon_static_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_static_card(), "green", 6)
static_value = lrg2.autotest_text_static_address().text
assert str(static_value) == "192.168.10.63"
lrg2.wait_for_object_v(lrg2.autotest_icon_dns_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_dns_card(), "green", 6)
dns_value = lrg2.autotest_text_dns().text
assert dns_value.strip() == "192.168.10.1"
lrg2.wait_for_object_description(lrg2.autotest_icon_gateway_card(), "green", 6)
gate_value = lrg2.autotest_text_gateway().text
assert str(gate_value) == "192.168.10.1"
lrg2.wait_for_object_v(lrg2.autotest_text_cloud(), 15, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_cloud_card(), "green", 15)
cloud_value = lrg2.autotest_text_cloud().text
assert str(cloud_value) == "Link-Live.com"
def testmethod_8():
'''<case_id>69</case_id>'''
print "Method testmethod_8 in class Static_ip_address_test suite"
print "Method set DNS2,not set DNS1 in Static_ip_address_test suite"
lrg2 = app_lrg2.app_lrg2()
lrg2.launch_from_icon()
lrg2.set_static_address("192.168.10.63", "255.255.255.0", "192.168.10.1", "", "192.168.10.1")
lrg2.wait_for_object_v(lrg2.autotest_icon_static_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_static_card(), "green", 6)
static_value = lrg2.autotest_text_static_address().text
assert str(static_value) == "192.168.10.63"
lrg2.wait_for_object_v(lrg2.autotest_icon_dns_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_dns_card(), "green", 6)
lrg2.autotest_text_dns().click()
dns_value = lrg2.autotest_detail_dns_title().text
assert dns_value == "DNS2"
dns_value = lrg2.autotest_text_dns().text
assert dns_value.strip() == "192.168.10.1"
lrg2.autotest_text_dns().click()
lrg2.wait_for_object_description(lrg2.autotest_icon_gateway_card(), "green", 6)
gate_value = lrg2.autotest_text_gateway().text
assert str(gate_value) == "192.168.10.1"
lrg2.wait_for_object_v(lrg2.autotest_text_cloud(), 15, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_cloud_card(), "green", 15)
cloud_value = lrg2.autotest_text_cloud().text
assert str(cloud_value) == "Link-Live.com"
def testmethod_9():
'''<case_id>70</case_id>'''
print "Method testmethod_9 in class Static_ip_address_test suite"
print "Method set DNS2,set DNS1 in Static_ip_address_test suite"
lrg2 = app_lrg2.app_lrg2()
lrg2.launch_from_icon()
lrg2.set_static_address("192.168.10.63", "255.255.255.0", "192.168.10.1", "192.168.1.1", "192.168.10.1")
lrg2.wait_for_object_v(lrg2.autotest_icon_static_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_static_card(), "green", 6)
static_value = lrg2.autotest_text_static_address().text
assert str(static_value) == "192.168.10.63"
lrg2.wait_for_object_v(lrg2.autotest_icon_dns_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_dns_card(), "red", 6)
lrg2.autotest_text_dns().click()
dns_value = lrg2.autotest_detail_dns_title().text
assert dns_value == "DNS1"
dns_value = lrg2.autotest_detail_dns_pingtime().text
assert dns_value == " --, --, --"
lrg2.autotest_text_dns().click()
lrg2.wait_for_object_description(lrg2.autotest_icon_gateway_card(), "green", 6)
gate_value = lrg2.autotest_text_gateway().text
assert str(gate_value) == "192.168.10.1"
lrg2.wait_for_object_v(lrg2.autotest_text_cloud(), 15, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_cloud_card(), "green", 15)
cloud_value = lrg2.autotest_text_cloud().text
assert str(cloud_value) == "Link-Live.com"
def testmethod_10():
'''<case_id>71</case_id>'''
print "Method testmethod_10 in class Static_ip_address_test suite"
print "Method DNS server is in the different subnet, while GW is set in Static_ip_address_test suite"
lrg2 = app_lrg2.app_lrg2()
lrg2.launch_from_icon()
lrg2.set_static_address("192.168.10.63", "255.255.255.0", "192.168.10.1", "192.168.1.1", "192.168.10.1")
lrg2.wait_for_object_v(lrg2.autotest_icon_static_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_static_card(), "green", 6)
static_value = lrg2.autotest_text_static_address().text
assert str(static_value) == "192.168.10.63"
lrg2.wait_for_object_v(lrg2.autotest_icon_dns_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_dns_card(), "green", 6)
lrg2.autotest_text_dns().click()
dns_value_1 = lrg2.autotest_detail_dns_title()[0].text
assert dns_value_1 == "DNS1"
time.sleep(3)
dns_ping_1 = lrg2.autotest_detail_dns_pingtime()[0].text
assert dns_ping_1 == " --, --, --"
lrg2.autotest_text_dns().click()
lrg2.wait_for_object_description(lrg2.autotest_icon_gateway_card(), "green", 6)
gate_value = lrg2.autotest_text_gateway().text
assert str(gate_value) == "192.168.10.1"
lrg2.wait_for_object_v(lrg2.autotest_text_cloud(), 15, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_cloud_card(), "green", 15)
cloud_value = lrg2.autotest_text_cloud().text
assert str(cloud_value) == "Link-Live.com"
def testmethod_11():
'''<case_id>72</case_id>'''
print "Method testmethod_11 in class Static_ip_address_test suite"
print "Method DNS server is in the different subnet, while GW is not set in Static_ip_address_test suite"
lrg2 = app_lrg2.app_lrg2()
lrg2.launch_from_icon()
lrg2.set_static_address("192.168.10.63", "255.255.255.0", "192.168.10.1", "192.168.1.1", "192.168.10.1")
lrg2.wait_for_object_v(lrg2.autotest_icon_static_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_static_card(), "green", 6)
static_value = lrg2.autotest_text_static_address().text
assert str(static_value) == "192.168.10.63"
lrg2.wait_for_object_v(lrg2.autotest_icon_dns_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_dns_card(), "red", 6)
lrg2.autotest_text_dns().click()
dns_value_1 = lrg2.autotest_detail_dns_title()[0].text
assert dns_value_1 == "DNS1"
time.sleep(3)
dns_value_1 = lrg2.autotest_detail_dns_pingtime()[0].text
assert dns_value_1 != "ARP Failed"
lrg2.autotest_text_dns().click()
lrg2.wait_for_object_v(lrg2.autotest_text_cloud(), 15, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_cloud_card(), "red", 15)
cloud_value = lrg2.autotest_text_cloud().text
assert str(cloud_value) == "DNS Lookup Failed"
def testmethod_12():
'''<case_id>84</case_id>'''
print "Method testmethod_12 in class Static_ip_address_test suite"
print "Method confirm LLT can identify the address in the subnet based on netmask is set in Static_ip_address_test suite"
lrg2 = app_lrg2.app_lrg2()
lrg2.launch_from_icon()
lrg2.set_static_address("192.168.10.63", "255.255.255.0", "192.168.10.1", "192.168.1.1", "192.168.10.1")
lrg2.wait_for_object_v(lrg2.autotest_icon_static_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_static_card(), "green", 6)
static_value = lrg2.autotest_text_static_address().text
assert str(static_value) == "192.168.10.63"
lrg2.wait_for_object_v(lrg2.autotest_icon_dns_card(), 35, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_dns_card(), "green", 6)
lrg2.autotest_text_dns().click()
dns_value_1 = lrg2.autotest_detail_dns_title()[0].text
assert dns_value_1 == "DNS1"
lrg2.autotest_text_dns().click()
lrg2.wait_for_object_description(lrg2.autotest_icon_gateway_card(), "green", 6)
gate_value = lrg2.autotest_text_gateway().text
assert str(gate_value) == "192.168.10.1"
lrg2.wait_for_object_v(lrg2.autotest_text_cloud(), 15, True)
lrg2.wait_for_object_description(lrg2.autotest_icon_cloud_card(), "green", 15)
cloud_value = lrg2.autotest_text_cloud().text
assert str(cloud_value) == "Link-Live.com"
| 38.376263
| 125
| 0.727907
| 2,369
| 15,197
| 4.311102
| 0.050654
| 0.149222
| 0.079702
| 0.123176
| 0.962107
| 0.92627
| 0.913835
| 0.912562
| 0.905121
| 0.899148
| 0
| 0.085302
| 0.142199
| 15,197
| 395
| 126
| 38.473418
| 0.698144
| 0
| 0
| 0.75502
| 0
| 0
| 0.21065
| 0.035501
| 0
| 0
| 0
| 0
| 0.164659
| 0
| null | null | 0
| 0.012048
| null | null | 0.104418
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ec3abe09d2474992ff87046d9858c36dbc11e109
| 17,408
|
py
|
Python
|
sdk/python/pulumi_openstack/compute/keypair.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 34
|
2018-09-12T12:37:51.000Z
|
2022-02-04T19:32:13.000Z
|
sdk/python/pulumi_openstack/compute/keypair.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 72
|
2018-08-15T13:04:57.000Z
|
2022-03-31T15:39:49.000Z
|
sdk/python/pulumi_openstack/compute/keypair.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2019-03-14T08:28:49.000Z
|
2021-12-29T04:23:55.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['KeypairArgs', 'Keypair']
@pulumi.input_type
class KeypairArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
public_key: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
The set of arguments for constructing a Keypair resource.
:param pulumi.Input[str] name: A unique name for the keypair. Changing this creates a new
keypair.
:param pulumi.Input[str] public_key: A pregenerated OpenSSH-formatted public key.
Changing this creates a new keypair. If a public key is not specified, then
a public/private key pair will be automatically generated. If a pair is
created, then destroying this resource means you will lose access to that
keypair forever.
:param pulumi.Input[str] region: The region in which to obtain the V2 Compute client.
Keypairs are associated with accounts, but a Compute client is needed to
create one. If omitted, the `region` argument of the provider is used.
Changing this creates a new keypair.
:param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional options.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if public_key is not None:
pulumi.set(__self__, "public_key", public_key)
if region is not None:
pulumi.set(__self__, "region", region)
if value_specs is not None:
pulumi.set(__self__, "value_specs", value_specs)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name for the keypair. Changing this creates a new
keypair.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> Optional[pulumi.Input[str]]:
"""
A pregenerated OpenSSH-formatted public key.
Changing this creates a new keypair. If a public key is not specified, then
a public/private key pair will be automatically generated. If a pair is
created, then destroying this resource means you will lose access to that
keypair forever.
"""
return pulumi.get(self, "public_key")
@public_key.setter
def public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_key", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 Compute client.
Keypairs are associated with accounts, but a Compute client is needed to
create one. If omitted, the `region` argument of the provider is used.
Changing this creates a new keypair.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="valueSpecs")
def value_specs(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Map of additional options.
"""
return pulumi.get(self, "value_specs")
@value_specs.setter
def value_specs(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "value_specs", value)
@pulumi.input_type
class _KeypairState:
def __init__(__self__, *,
fingerprint: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None,
public_key: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
Input properties used for looking up and filtering Keypair resources.
:param pulumi.Input[str] fingerprint: The fingerprint of the public key.
:param pulumi.Input[str] name: A unique name for the keypair. Changing this creates a new
keypair.
:param pulumi.Input[str] private_key: The generated private key when no public key is specified.
:param pulumi.Input[str] public_key: A pregenerated OpenSSH-formatted public key.
Changing this creates a new keypair. If a public key is not specified, then
a public/private key pair will be automatically generated. If a pair is
created, then destroying this resource means you will lose access to that
keypair forever.
:param pulumi.Input[str] region: The region in which to obtain the V2 Compute client.
Keypairs are associated with accounts, but a Compute client is needed to
create one. If omitted, the `region` argument of the provider is used.
Changing this creates a new keypair.
:param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional options.
"""
if fingerprint is not None:
pulumi.set(__self__, "fingerprint", fingerprint)
if name is not None:
pulumi.set(__self__, "name", name)
if private_key is not None:
pulumi.set(__self__, "private_key", private_key)
if public_key is not None:
pulumi.set(__self__, "public_key", public_key)
if region is not None:
pulumi.set(__self__, "region", region)
if value_specs is not None:
pulumi.set(__self__, "value_specs", value_specs)
@property
@pulumi.getter
def fingerprint(self) -> Optional[pulumi.Input[str]]:
"""
The fingerprint of the public key.
"""
return pulumi.get(self, "fingerprint")
@fingerprint.setter
def fingerprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fingerprint", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name for the keypair. Changing this creates a new
keypair.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> Optional[pulumi.Input[str]]:
"""
The generated private key when no public key is specified.
"""
return pulumi.get(self, "private_key")
@private_key.setter
def private_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_key", value)
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> Optional[pulumi.Input[str]]:
"""
A pregenerated OpenSSH-formatted public key.
Changing this creates a new keypair. If a public key is not specified, then
a public/private key pair will be automatically generated. If a pair is
created, then destroying this resource means you will lose access to that
keypair forever.
"""
return pulumi.get(self, "public_key")
@public_key.setter
def public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_key", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 Compute client.
Keypairs are associated with accounts, but a Compute client is needed to
create one. If omitted, the `region` argument of the provider is used.
Changing this creates a new keypair.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="valueSpecs")
def value_specs(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Map of additional options.
"""
return pulumi.get(self, "value_specs")
@value_specs.setter
def value_specs(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "value_specs", value)
class Keypair(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
public_key: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,
__props__=None):
"""
## Import
Keypairs can be imported using the `name`, e.g.
```sh
$ pulumi import openstack:compute/keypair:Keypair my-keypair test-keypair
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: A unique name for the keypair. Changing this creates a new
keypair.
:param pulumi.Input[str] public_key: A pregenerated OpenSSH-formatted public key.
Changing this creates a new keypair. If a public key is not specified, then
a public/private key pair will be automatically generated. If a pair is
created, then destroying this resource means you will lose access to that
keypair forever.
:param pulumi.Input[str] region: The region in which to obtain the V2 Compute client.
Keypairs are associated with accounts, but a Compute client is needed to
create one. If omitted, the `region` argument of the provider is used.
Changing this creates a new keypair.
:param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional options.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[KeypairArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
Keypairs can be imported using the `name`, e.g.
```sh
$ pulumi import openstack:compute/keypair:Keypair my-keypair test-keypair
```
:param str resource_name: The name of the resource.
:param KeypairArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(KeypairArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
public_key: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = KeypairArgs.__new__(KeypairArgs)
__props__.__dict__["name"] = name
__props__.__dict__["public_key"] = public_key
__props__.__dict__["region"] = region
__props__.__dict__["value_specs"] = value_specs
__props__.__dict__["fingerprint"] = None
__props__.__dict__["private_key"] = None
super(Keypair, __self__).__init__(
'openstack:compute/keypair:Keypair',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
fingerprint: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None,
public_key: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None) -> 'Keypair':
"""
Get an existing Keypair resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] fingerprint: The fingerprint of the public key.
:param pulumi.Input[str] name: A unique name for the keypair. Changing this creates a new
keypair.
:param pulumi.Input[str] private_key: The generated private key when no public key is specified.
:param pulumi.Input[str] public_key: A pregenerated OpenSSH-formatted public key.
Changing this creates a new keypair. If a public key is not specified, then
a public/private key pair will be automatically generated. If a pair is
created, then destroying this resource means you will lose access to that
keypair forever.
:param pulumi.Input[str] region: The region in which to obtain the V2 Compute client.
Keypairs are associated with accounts, but a Compute client is needed to
create one. If omitted, the `region` argument of the provider is used.
Changing this creates a new keypair.
:param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional options.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _KeypairState.__new__(_KeypairState)
__props__.__dict__["fingerprint"] = fingerprint
__props__.__dict__["name"] = name
__props__.__dict__["private_key"] = private_key
__props__.__dict__["public_key"] = public_key
__props__.__dict__["region"] = region
__props__.__dict__["value_specs"] = value_specs
return Keypair(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def fingerprint(self) -> pulumi.Output[str]:
"""
The fingerprint of the public key.
"""
return pulumi.get(self, "fingerprint")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A unique name for the keypair. Changing this creates a new
keypair.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> pulumi.Output[str]:
"""
The generated private key when no public key is specified.
"""
return pulumi.get(self, "private_key")
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> pulumi.Output[str]:
"""
A pregenerated OpenSSH-formatted public key.
Changing this creates a new keypair. If a public key is not specified, then
a public/private key pair will be automatically generated. If a pair is
created, then destroying this resource means you will lose access to that
keypair forever.
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region in which to obtain the V2 Compute client.
Keypairs are associated with accounts, but a Compute client is needed to
create one. If omitted, the `region` argument of the provider is used.
Changing this creates a new keypair.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="valueSpecs")
def value_specs(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
Map of additional options.
"""
return pulumi.get(self, "value_specs")
| 42.150121
| 134
| 0.630055
| 2,123
| 17,408
| 4.996703
| 0.080546
| 0.070513
| 0.069947
| 0.072587
| 0.838424
| 0.815422
| 0.790913
| 0.781109
| 0.776301
| 0.755373
| 0
| 0.000637
| 0.278722
| 17,408
| 412
| 135
| 42.252427
| 0.844218
| 0.396484
| 0
| 0.700483
| 1
| 0
| 0.075795
| 0.003558
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15942
| false
| 0.004831
| 0.024155
| 0
| 0.280193
| 0.062802
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ec42461ec8cb2542453e4675001f770b9463abf9
| 6,011
|
py
|
Python
|
modules/ESP32/marker.py
|
ccccmagicboy/MicroPython_fw
|
d2049bc19e3d5010f5d6d0d17aa13a8693914fbd
|
[
"MIT"
] | 23
|
2020-01-22T00:40:20.000Z
|
2021-08-03T20:42:07.000Z
|
modules/ESP32/marker.py
|
ccccmagicboy/MicroPython_fw
|
d2049bc19e3d5010f5d6d0d17aa13a8693914fbd
|
[
"MIT"
] | 10
|
2020-02-18T09:57:04.000Z
|
2020-03-04T11:39:17.000Z
|
modules/ESP32/marker.py
|
ccccmagicboy/MicroPython_fw
|
d2049bc19e3d5010f5d6d0d17aa13a8693914fbd
|
[
"MIT"
] | 5
|
2020-02-20T09:35:45.000Z
|
2022-01-04T16:23:13.000Z
|
def glyphs():
return 97
_font =\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a'\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a'\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a'\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a'\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a'\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a'\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x11\x4b\x59\x51\x4b\x4e'\
b'\x4c\x4c\x4e\x4b\x51\x4b\x53\x4c\x56\x4e\x58\x51\x59\x53\x59'\
b'\x56\x58\x58\x56\x59\x53\x59\x51\x58\x4e\x56\x4c\x53\x4b\x51'\
b'\x4b\x05\x4c\x58\x4c\x4c\x4c\x58\x58\x58\x58\x4c\x4c\x4c\x04'\
b'\x4b\x59\x52\x4a\x4b\x56\x59\x56\x52\x4a\x05\x4c\x58\x52\x48'\
b'\x4c\x52\x52\x5c\x58\x52\x52\x48\x0b\x4a\x5a\x52\x49\x50\x4f'\
b'\x4a\x4f\x4f\x53\x4d\x59\x52\x55\x57\x59\x55\x53\x5a\x4f\x54'\
b'\x4f\x52\x49\x0d\x4c\x58\x50\x4c\x50\x50\x4c\x50\x4c\x54\x50'\
b'\x54\x50\x58\x54\x58\x54\x54\x58\x54\x58\x50\x54\x50\x54\x4c'\
b'\x50\x4c\x05\x4b\x59\x52\x4b\x52\x59\x20\x52\x4b\x52\x59\x52'\
b'\x05\x4d\x57\x4d\x4d\x57\x57\x20\x52\x57\x4d\x4d\x57\x08\x4d'\
b'\x57\x52\x4c\x52\x58\x20\x52\x4d\x4f\x57\x55\x20\x52\x57\x4f'\
b'\x4d\x55\x22\x4e\x56\x51\x4e\x4f\x4f\x4e\x51\x4e\x53\x4f\x55'\
b'\x51\x56\x53\x56\x55\x55\x56\x53\x56\x51\x55\x4f\x53\x4e\x51'\
b'\x4e\x20\x52\x4f\x51\x4f\x53\x20\x52\x50\x50\x50\x54\x20\x52'\
b'\x51\x4f\x51\x55\x20\x52\x52\x4f\x52\x55\x20\x52\x53\x4f\x53'\
b'\x55\x20\x52\x54\x50\x54\x54\x20\x52\x55\x51\x55\x53\x1a\x4e'\
b'\x56\x4e\x4e\x4e\x56\x56\x56\x56\x4e\x4e\x4e\x20\x52\x4f\x4f'\
b'\x4f\x55\x20\x52\x50\x4f\x50\x55\x20\x52\x51\x4f\x51\x55\x20'\
b'\x52\x52\x4f\x52\x55\x20\x52\x53\x4f\x53\x55\x20\x52\x54\x4f'\
b'\x54\x55\x20\x52\x55\x4f\x55\x55\x10\x4d\x57\x52\x4c\x4d\x55'\
b'\x57\x55\x52\x4c\x20\x52\x52\x4f\x4f\x54\x20\x52\x52\x4f\x55'\
b'\x54\x20\x52\x52\x52\x51\x54\x20\x52\x52\x52\x53\x54\x10\x4c'\
b'\x55\x4c\x52\x55\x57\x55\x4d\x4c\x52\x20\x52\x4f\x52\x54\x55'\
b'\x20\x52\x4f\x52\x54\x4f\x20\x52\x52\x52\x54\x53\x20\x52\x52'\
b'\x52\x54\x51\x10\x4d\x57\x52\x58\x57\x4f\x4d\x4f\x52\x58\x20'\
b'\x52\x52\x55\x55\x50\x20\x52\x52\x55\x4f\x50\x20\x52\x52\x52'\
b'\x53\x50\x20\x52\x52\x52\x51\x50\x10\x4f\x58\x58\x52\x4f\x4d'\
b'\x4f\x57\x58\x52\x20\x52\x55\x52\x50\x4f\x20\x52\x55\x52\x50'\
b'\x55\x20\x52\x52\x52\x50\x51\x20\x52\x52\x52\x50\x53\x08\x44'\
b'\x60\x44\x52\x60\x52\x20\x52\x44\x52\x52\x62\x20\x52\x60\x52'\
b'\x52\x62\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00'\
b'\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00'\
b'\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00'\
b'\x4a\x5a\x00\x4a\x5a\x11\x4b\x59\x51\x4b\x4e\x4c\x4c\x4e\x4b'\
b'\x51\x4b\x53\x4c\x56\x4e\x58\x51\x59\x53\x59\x56\x58\x58\x56'\
b'\x59\x53\x59\x51\x58\x4e\x56\x4c\x53\x4b\x51\x4b\x05\x4c\x58'\
b'\x4c\x4c\x4c\x58\x58\x58\x58\x4c\x4c\x4c\x04\x4b\x59\x52\x4a'\
b'\x4b\x56\x59\x56\x52\x4a\x05\x4c\x58\x52\x48\x4c\x52\x52\x5c'\
b'\x58\x52\x52\x48\x0b\x4a\x5a\x52\x49\x50\x4f\x4a\x4f\x4f\x53'\
b'\x4d\x59\x52\x55\x57\x59\x55\x53\x5a\x4f\x54\x4f\x52\x49\x0d'\
b'\x4c\x58\x50\x4c\x50\x50\x4c\x50\x4c\x54\x50\x54\x50\x58\x54'\
b'\x58\x54\x54\x58\x54\x58\x50\x54\x50\x54\x4c\x50\x4c\x05\x4b'\
b'\x59\x52\x4b\x52\x59\x20\x52\x4b\x52\x59\x52\x05\x4d\x57\x4d'\
b'\x4d\x57\x57\x20\x52\x57\x4d\x4d\x57\x08\x4d\x57\x52\x4c\x52'\
b'\x58\x20\x52\x4d\x4f\x57\x55\x20\x52\x57\x4f\x4d\x55\x22\x4e'\
b'\x56\x51\x4e\x4f\x4f\x4e\x51\x4e\x53\x4f\x55\x51\x56\x53\x56'\
b'\x55\x55\x56\x53\x56\x51\x55\x4f\x53\x4e\x51\x4e\x20\x52\x4f'\
b'\x51\x4f\x53\x20\x52\x50\x50\x50\x54\x20\x52\x51\x4f\x51\x55'\
b'\x20\x52\x52\x4f\x52\x55\x20\x52\x53\x4f\x53\x55\x20\x52\x54'\
b'\x50\x54\x54\x20\x52\x55\x51\x55\x53\x1a\x4e\x56\x4e\x4e\x4e'\
b'\x56\x56\x56\x56\x4e\x4e\x4e\x20\x52\x4f\x4f\x4f\x55\x20\x52'\
b'\x50\x4f\x50\x55\x20\x52\x51\x4f\x51\x55\x20\x52\x52\x4f\x52'\
b'\x55\x20\x52\x53\x4f\x53\x55\x20\x52\x54\x4f\x54\x55\x20\x52'\
b'\x55\x4f\x55\x55\x10\x4d\x57\x52\x4c\x4d\x55\x57\x55\x52\x4c'\
b'\x20\x52\x52\x4f\x4f\x54\x20\x52\x52\x4f\x55\x54\x20\x52\x52'\
b'\x52\x51\x54\x20\x52\x52\x52\x53\x54\x10\x4c\x55\x4c\x52\x55'\
b'\x57\x55\x4d\x4c\x52\x20\x52\x4f\x52\x54\x55\x20\x52\x4f\x52'\
b'\x54\x4f\x20\x52\x52\x52\x54\x53\x20\x52\x52\x52\x54\x51\x10'\
b'\x4d\x57\x52\x58\x57\x4f\x4d\x4f\x52\x58\x20\x52\x52\x55\x55'\
b'\x50\x20\x52\x52\x55\x4f\x50\x20\x52\x52\x52\x53\x50\x20\x52'\
b'\x52\x52\x51\x50\x10\x4f\x58\x58\x52\x4f\x4d\x4f\x57\x58\x52'\
b'\x20\x52\x55\x52\x50\x4f\x20\x52\x55\x52\x50\x55\x20\x52\x52'\
b'\x52\x50\x51\x20\x52\x52\x52\x50\x53\x08\x44\x60\x44\x52\x60'\
b'\x52\x20\x52\x44\x52\x52\x62\x20\x52\x60\x52\x52\x62\x00\x4a'\
b'\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a'\
b'\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a'\
b'\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a'\
b'\x5a'
_index =\
b'\x00\x00\x03\x00\x06\x00\x09\x00\x0c\x00\x0f\x00\x12\x00\x15'\
b'\x00\x18\x00\x1b\x00\x1e\x00\x21\x00\x24\x00\x27\x00\x2a\x00'\
b'\x2d\x00\x30\x00\x33\x00\x36\x00\x39\x00\x3c\x00\x3f\x00\x42'\
b'\x00\x45\x00\x48\x00\x4b\x00\x4e\x00\x51\x00\x54\x00\x57\x00'\
b'\x5a\x00\x5d\x00\x60\x00\x63\x00\x88\x00\x95\x00\xa0\x00\xad'\
b'\x00\xc6\x00\xe3\x00\xf0\x00\xfd\x00\x10\x01\x57\x01\x8e\x01'\
b'\xb1\x01\xd4\x01\xf7\x01\x1a\x02\x2d\x02\x30\x02\x33\x02\x36'\
b'\x02\x39\x02\x3c\x02\x3f\x02\x42\x02\x45\x02\x48\x02\x4b\x02'\
b'\x4e\x02\x51\x02\x54\x02\x57\x02\x5a\x02\x5d\x02\x82\x02\x8f'\
b'\x02\x9a\x02\xa7\x02\xc0\x02\xdd\x02\xea\x02\xf7\x02\x0a\x03'\
b'\x51\x03\x88\x03\xab\x03\xce\x03\xf1\x03\x14\x04\x27\x04\x2a'\
b'\x04\x2d\x04\x30\x04\x33\x04\x36\x04\x39\x04\x3c\x04\x3f\x04'\
b'\x42\x04\x45\x04\x48\x04\x4b\x04\x4e\x04\x51\x04\x54\x04'
_mvfont = memoryview(_font)
def _chr_addr(ordch):
offset = 2 * (ordch - 32)
return int.from_bytes(_index[offset:offset + 2], 'little')
def get_ch(ordch):
offset = _chr_addr(ordch if 32 <= ordch <= 127 else ord('?'))
count = _font[offset]
return _mvfont[offset:offset+(count+2)*2-1]
| 55.657407
| 65
| 0.70371
| 1,444
| 6,011
| 2.92036
| 0.077562
| 0.099597
| 0.123785
| 0.142281
| 0.652597
| 0.634337
| 0.631492
| 0.631492
| 0.631492
| 0.6258
| 0
| 0.377478
| 0.026452
| 6,011
| 107
| 66
| 56.17757
| 0.343131
| 0
| 0
| 0.11
| 0
| 0.87
| 0.869862
| 0.868031
| 0
| 1
| 0
| 0
| 0
| 1
| 0.03
| false
| 0
| 0
| 0.01
| 0.06
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b99ab243dbdc7a69bfedc4fb8316ef8d9a78d47
| 579
|
py
|
Python
|
Exercicios-mundo-3/desafio109/moeda.py
|
talitadeoa/Exercicios-Python
|
6ffac5b403ef4636d8b7b37aba7998dade8a88b8
|
[
"MIT"
] | null | null | null |
Exercicios-mundo-3/desafio109/moeda.py
|
talitadeoa/Exercicios-Python
|
6ffac5b403ef4636d8b7b37aba7998dade8a88b8
|
[
"MIT"
] | null | null | null |
Exercicios-mundo-3/desafio109/moeda.py
|
talitadeoa/Exercicios-Python
|
6ffac5b403ef4636d8b7b37aba7998dade8a88b8
|
[
"MIT"
] | null | null | null |
def aumentar(n,p,formatar=False):
v = n + (n * p / 100)
if formatar:
return moeda(v)
else:
return v
def diminuir(n,p,formatar=False):
v = n - (n * p / 100)
if formatar:
return moeda(v)
else:
return v
def dobro(n,formatar=False):
v = n*2
if formatar:
return moeda(v)
else:
return v
def metade(n,formatar=False):
v = n/2
if formatar:
return moeda(v)
else:
return v
def moeda(n=0, moeda='R$'):
return f'{moeda}{n:>2.2f}'.replace('.', ',')
| 18.677419
| 48
| 0.497409
| 83
| 579
| 3.46988
| 0.253012
| 0.027778
| 0.194444
| 0.208333
| 0.770833
| 0.770833
| 0.770833
| 0.770833
| 0.770833
| 0.770833
| 0
| 0.02973
| 0.360967
| 579
| 31
| 48
| 18.677419
| 0.748649
| 0
| 0
| 0.615385
| 0
| 0
| 0.034542
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192308
| false
| 0
| 0
| 0.038462
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
6bc35ece46343c2ebbafdb1fa987096a59fe0110
| 5,664
|
py
|
Python
|
tests/analyzer/test_measure_mapping.py
|
CMSgov/qpp-claims-to-quality-public
|
1e2da9494faf9e316a17cbe899284db9e61d0902
|
[
"CC0-1.0"
] | 13
|
2018-09-28T14:02:59.000Z
|
2021-12-07T21:31:54.000Z
|
tests/analyzer/test_measure_mapping.py
|
CMSgov/qpp-claims-to-quality-public
|
1e2da9494faf9e316a17cbe899284db9e61d0902
|
[
"CC0-1.0"
] | 1
|
2018-10-01T17:49:05.000Z
|
2018-10-09T01:10:56.000Z
|
tests/analyzer/test_measure_mapping.py
|
CMSgov/qpp-claims-to-quality-public
|
1e2da9494faf9e316a17cbe899284db9e61d0902
|
[
"CC0-1.0"
] | 1
|
2021-02-08T18:32:16.000Z
|
2021-02-08T18:32:16.000Z
|
"""Tests for measure_mapping file."""
from claims_to_quality.analyzer import measure_mapping
from claims_to_quality.analyzer.calculation import intersecting_diagnosis_measure
from claims_to_quality.analyzer.calculation.patient_process_measure import PatientProcessMeasure
from claims_to_quality.analyzer.datasource import measure_reader
from claims_to_quality.config import config
import pytest
def test_get_measure_calculator():
"""Test the function get_measure_calculator."""
measure = measure_mapping.get_measure_calculator(measure_number='047')
assert isinstance(measure, PatientProcessMeasure)
def test_get_measure_calculator_not_implemented():
"""Verify get_measure_calculator raises error if the measure isn't implemented."""
with pytest.raises(KeyError):
measure_mapping.get_measure_calculator(measure_number='not_a_measure')
class TestReadAllMeasuresFromSingleSource2017():
"""Test suite for tests requiring reading all 2017 measures from the single source."""
@classmethod
def setup_class(cls):
"""Load all measures for further tests."""
# TODO: Test these functions with their default values from config.
year = 2017
cls.measures = measure_mapping.get_all_measure_ids(year=year)
cls.single_source = measure_reader.load_single_source(
json_path=config.get('assets.qpp_single_source_json')[year]
)
cls.measure_calculators = measure_mapping.get_measure_calculators(
measures=cls.measures, year=year
).values()
cls.measure_definitions = [
calculator.measure_definition for calculator in cls.measure_calculators
]
def test_all_eligibility_options_include_at_least_one_procedure_code(self):
"""
All eligibility options must require at least one procedure or encounter code.
The logic used to filter claims returned from the IDR requires this to be the case.
"""
for measure_definition in self.measure_definitions:
for eligibility_option in measure_definition.eligibility_options:
assert(len(eligibility_option.procedure_codes) > 0)
def test_all_intersecting_diagnosis_measures_have_the_same_diagnosis_codes(self):
"""These measures should have the same diagnosis codes across eligibility options."""
intersecting_diagnosis_measures = [
calculator
for calculator in self.measure_calculators
if isinstance(calculator, intersecting_diagnosis_measure.IntersectingDiagnosisMeasure)
]
for measure in intersecting_diagnosis_measures:
for option in measure.eligibility_options:
assert (
option.diagnosis_codes_set == measure.eligibility_options[0].diagnosis_codes_set
)
def test_all_measures_can_be_calculated(self):
"""Test that all measures have measure calculator objects."""
# FIXME: Update this to read directly from single source instead of hard-coding.
assert len(self.measures) == 74
assert len(self.measure_calculators) == 74
@classmethod
def teardown_class(cls):
"""Reload default config for the other tests."""
config.reload_config()
class TestReadAllMeasuresFromSingleSource2018():
"""Test suite for tests requiring reading all 2018 measures from the single source."""
@classmethod
def setup_class(cls):
"""Load all measures for further tests."""
# Use 2018 as measures year in config.
year = 2018
cls.measures = measure_mapping.get_all_measure_ids(year=year)
cls.single_source = measure_reader.load_single_source(
json_path=config.get('assets.qpp_single_source_json')[year]
)
cls.measure_calculators = measure_mapping.get_measure_calculators(
measures=cls.measures, year=year
).values()
cls.measure_definitions = [
calculator.measure_definition for calculator in cls.measure_calculators
]
def test_all_eligibility_options_include_at_least_one_procedure_code(self):
"""
All eligibility options must require at least one procedure or encounter code.
The logic used to filter claims returned from the IDR requires this to be the case.
"""
for measure_definition in self.measure_definitions:
for eligibility_option in measure_definition.eligibility_options:
assert(len(eligibility_option.procedure_codes) > 0)
def test_all_intersecting_diagnosis_measures_have_the_same_diagnosis_codes(self):
"""These measures should have the same diagnosis codes across eligibility options."""
intersecting_diagnosis_measures = [
calculator
for calculator in self.measure_calculators
if isinstance(calculator, intersecting_diagnosis_measure.IntersectingDiagnosisMeasure)
]
for measure in intersecting_diagnosis_measures:
for option in measure.eligibility_options:
assert (
option.diagnosis_codes_set == measure.eligibility_options[0].diagnosis_codes_set
)
def test_all_measures_can_be_calculated(self):
"""Test that all measures have measure calculator objects."""
# FIXME: Update this to read directly from single source instead of hard-coding.
assert len(self.measures) == 72
assert len(self.measure_calculators) == 72
@classmethod
def teardown_class(cls):
"""Reload default config for the other tests."""
config.reload_config()
| 43.236641
| 100
| 0.71363
| 652
| 5,664
| 5.937117
| 0.193252
| 0.0558
| 0.031
| 0.024541
| 0.84216
| 0.798243
| 0.77861
| 0.735727
| 0.735727
| 0.735727
| 0
| 0.009768
| 0.222811
| 5,664
| 130
| 101
| 43.569231
| 0.869605
| 0.235523
| 0
| 0.658537
| 0
| 0
| 0.017623
| 0.013813
| 0
| 0
| 0
| 0.023077
| 0.109756
| 1
| 0.146341
| false
| 0
| 0.073171
| 0
| 0.243902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6bc3e461a2c620a6edc793fca36d4ce521d00f0a
| 12,093
|
py
|
Python
|
examples/tests/test_examples.py
|
grantmjg/beluga
|
2b06a1ae3de30f5fb98d78188f40e33cd8b155f1
|
[
"MIT"
] | null | null | null |
examples/tests/test_examples.py
|
grantmjg/beluga
|
2b06a1ae3de30f5fb98d78188f40e33cd8b155f1
|
[
"MIT"
] | null | null | null |
examples/tests/test_examples.py
|
grantmjg/beluga
|
2b06a1ae3de30f5fb98d78188f40e33cd8b155f1
|
[
"MIT"
] | null | null | null |
tol = 1e-3
def test_brachistochrone_shooting():
from math import pi
import beluga
from beluga.ivpsol import Trajectory
from beluga.bvpsol import Solution
ocp = beluga.OCP('brachisto')
# Define independent variables
ocp.independent('t', 's')
# Define equations of motion
ocp.state('x', 'v*cos(theta)', 'm') \
.state('y', 'v*sin(theta)', 'm') \
.state('v', 'g*sin(theta)', 'm/s')
# Define controls
ocp.control('theta', 'rad')
# Define constants
ocp.constant('g', -9.81, 'm/s^2')
# Define costs
ocp.path_cost('1', '1')
# Define constraints
ocp.constraints() \
.initial('x-x_0', 'm') \
.initial('y-y_0', 'm') \
.initial('v-v_0', 'm/s') \
.terminal('x-x_f', 'm') \
.terminal('y-y_f', 'm')
ocp.scale(m='y', s='y/v', kg=1, rad=1)
shooting_solver = beluga.bvp_algorithm('Shooting')
guess_maker = beluga.guess_generator('auto', start=[0, 0, 0], direction='forward', costate_guess=-0.1, control_guess = [-pi/2], use_control_guess=True)
continuation_steps = beluga.init_continuation()
continuation_steps.add_step('bisection') \
.num_cases(21) \
.terminal('x', 10) \
.terminal('y', -10)
sol = beluga.solve(ocp, method='icrm', bvp_algorithm=shooting_solver, steps=continuation_steps,
guess_generator=guess_maker)
assert isinstance(sol, Trajectory)
assert isinstance(sol, Solution)
assert sol.t.shape[0] == sol.y.shape[0]
assert sol.t.shape[0] == sol.u.shape[0]
assert sol.y.shape[1] == 7
assert sol.u.shape[1] == 1
y0 = sol.y[0]
yf = sol.y[-1]
assert abs(y0[0] - 0) < tol
assert abs(y0[1] - 0) < tol
assert abs(y0[2] - 0) < tol
assert abs(y0[3] + 0.0667) < tol
assert abs(y0[4] - 0.0255) < tol
assert abs(y0[5] + 0.1019) < tol
assert abs(sol.t[-1] - 1.8433) < tol
assert abs(yf[0] - 10) < tol
assert abs(yf[1] + 10) < tol
assert abs(yf[2] - 14.0071) < tol
assert abs(yf[3] + 0.0667) < tol
assert abs(yf[4] - 0.0255) < tol
assert abs(yf[5] - 0) < tol
assert abs(y0[3] - yf[3]) < tol
assert abs(y0[4] - yf[4]) < tol
sol = beluga.solve(ocp, method='traditional', bvp_algorithm=shooting_solver, steps=continuation_steps, guess_generator=guess_maker)
y0 = sol.y[0]
yf = sol.y[-1]
assert sol.t.shape[0] == sol.y.shape[0]
assert sol.t.shape[0] == sol.u.shape[0]
assert sol.y.shape[1] == 6
assert sol.u.shape[1] == 1
assert abs(y0[0] - 0) < tol
assert abs(y0[1] - 0) < tol
assert abs(y0[2] - 0) < tol
assert abs(y0[3] + 0.0667) < tol
assert abs(y0[4] - 0.0255) < tol
assert abs(y0[5] + 0.1019) < tol
assert abs(sol.t[-1] - 1.8433) < tol
assert abs(yf[0] - 10) < tol
assert abs(yf[1] + 10) < tol
assert abs(yf[2] - 14.0071) < tol
assert abs(yf[3] + 0.0667) < tol
assert abs(yf[4] - 0.0255) < tol
assert abs(yf[5] - 0) < tol
assert abs(y0[3] - yf[3]) < tol
assert abs(y0[4] - yf[4]) < tol
def test_brachistochrone_collocation():
from math import pi
import beluga
from beluga.ivpsol import Trajectory
from beluga.bvpsol import Solution
ocp = beluga.OCP('brachisto')
# Define independent variables
ocp.independent('t', 's')
# Define equations of motion
ocp.state('x', 'v*cos(theta)', 'm') \
.state('y', 'v*sin(theta)', 'm') \
.state('v', 'g*sin(theta)', 'm/s')
# Define controls
ocp.control('theta', 'rad')
# Define constants
ocp.constant('g', -9.81, 'm/s^2')
# Define costs
ocp.path_cost('1', '1')
# Define constraints
ocp.constraints() \
.initial('x-x_0', 'm') \
.initial('y-y_0', 'm') \
.initial('v-v_0', 'm/s') \
.terminal('x-x_f', 'm') \
.terminal('y-y_f', 'm')
ocp.scale(m='y', s='y/v', kg=1, rad=1)
shooting_solver = beluga.bvp_algorithm('Collocation')
guess_maker = beluga.guess_generator('auto', start=[0, 0, 0], direction='forward', costate_guess=-0.1, control_guess = [-pi/2], use_control_guess=True)
continuation_steps = beluga.init_continuation()
continuation_steps.add_step('bisection') \
.num_cases(21) \
.terminal('x', 10) \
.terminal('y', -10)
sol = beluga.solve(ocp, method='traditional', bvp_algorithm=shooting_solver, steps=continuation_steps, guess_generator=guess_maker)
assert isinstance(sol, Trajectory)
assert isinstance(sol, Solution)
assert sol.t.shape[0] == sol.y.shape[0]
assert sol.t.shape[0] == sol.u.shape[0]
assert sol.y.shape[1] == 6
assert sol.u.shape[1] == 1
y0 = sol.y[0]
yf = sol.y[-1]
assert abs(y0[0] - 0) < tol
assert abs(y0[1] - 0) < tol
assert abs(y0[2] - 0) < tol
assert abs(y0[3] + 0.0667) < tol
assert abs(y0[4] - 0.0255) < tol
assert abs(y0[5] + 0.1019) < tol
assert abs(sol.t[-1] - 1.8433) < tol
assert abs(yf[0] - 10) < tol
assert abs(yf[1] + 10) < tol
assert abs(yf[2] - 14.0071) < tol
assert abs(yf[3] + 0.0667) < tol
assert abs(yf[4] - 0.0255) < tol
assert abs(yf[5] - 0) < tol
assert abs(y0[3] - yf[3]) < tol
assert abs(y0[4] - yf[4]) < tol
sol = beluga.solve(ocp, method='icrm', bvp_algorithm=shooting_solver, steps=continuation_steps, guess_generator=guess_maker)
y0 = sol.y[0]
yf = sol.y[-1]
assert sol.t.shape[0] == sol.y.shape[0]
assert sol.t.shape[0] == sol.u.shape[0]
assert sol.y.shape[1] == 7
assert sol.u.shape[1] == 1
assert abs(y0[0] - 0) < tol
assert abs(y0[1] - 0) < tol
assert abs(y0[2] - 0) < tol
assert abs(y0[3] + 0.0667) < tol
assert abs(y0[4] - 0.0255) < tol
assert abs(y0[5] + 0.1019) < tol
assert abs(sol.t[-1] - 1.8433) < tol
assert abs(yf[0] - 10) < tol
assert abs(yf[1] + 10) < tol
assert abs(yf[2] - 14.0071) < tol
assert abs(yf[3] + 0.0667) < tol
assert abs(yf[4] - 0.0255) < tol
assert abs(yf[5] - 0) < tol
assert abs(y0[3] - yf[3]) < tol
assert abs(y0[4] - yf[4]) < tol
def test_brachistochrone_custom_functions():
from math import pi, cos, sin
import beluga
from beluga.ivpsol import Trajectory
from beluga.bvpsol import Solution
ocp = beluga.OCP('brachisto')
# Define independent variables
ocp.independent('t', 's')
# Define equations of motion
ocp.state('x', 'v*custom_cos(theta)', 'm') \
.state('y', 'v*custom_sin(theta)', 'm') \
.state('v', 'g*custom_sin(theta)', 'm/s')
# Define custom functions
def custom_cos(theta):
return cos(theta)
def custom_sin(theta):
return sin(theta)
ocp.custom_function('custom_cos', custom_cos)
ocp.custom_function('custom_sin', custom_sin)
# Define controls
ocp.control('theta', 'rad')
# Define constants
ocp.constant('g', -9.81, 'm/s^2')
# Define costs
ocp.path_cost('1', '1')
# Define constraints
ocp.constraints() \
.initial('x-x_0', 'm') \
.initial('y-y_0', 'm') \
.initial('v-v_0', 'm/s') \
.terminal('x-x_f', 'm') \
.terminal('y-y_f', 'm')
ocp.scale(m='y', s='y/v', kg=1, rad=1)
shooting_solver = beluga.bvp_algorithm('Shooting')
guess_maker = beluga.guess_generator('auto', start=[0, 0, 0], direction='forward', costate_guess=-0.1, control_guess = [-pi/2], use_control_guess=True)
continuation_steps = beluga.init_continuation()
continuation_steps.add_step('bisection') \
.num_cases(21) \
.terminal('x', 10) \
.terminal('y', -10)
sol = beluga.solve(ocp, method='icrm', bvp_algorithm=shooting_solver, steps=continuation_steps, guess_generator=guess_maker)
assert isinstance(sol, Trajectory)
assert isinstance(sol, Solution)
assert sol.t.shape[0] == sol.y.shape[0]
assert sol.t.shape[0] == sol.u.shape[0]
assert sol.y.shape[1] == 7
assert sol.u.shape[1] == 1
y0 = sol.y[0]
yf = sol.y[-1]
assert abs(y0[0] - 0) < tol
assert abs(y0[1] - 0) < tol
assert abs(y0[2] - 0) < tol
assert abs(y0[3] + 0.0667) < tol
assert abs(y0[4] - 0.0255) < tol
assert abs(y0[5] + 0.1019) < tol
assert abs(sol.t[-1] - 1.8433) < tol
assert abs(yf[0] - 10) < tol
assert abs(yf[1] + 10) < tol
assert abs(yf[2] - 14.0071) < tol
assert abs(yf[3] + 0.0667) < tol
assert abs(yf[4] - 0.0255) < tol
assert abs(yf[5] - 0) < tol
assert abs(y0[3] - yf[3]) < tol
assert abs(y0[4] - yf[4]) < tol
def test_planarhypersonic():
from math import pi
import beluga
ocp = beluga.OCP('planarHypersonic')
# Define independent variables
ocp.independent('t', 's')
# Define equations of motion
ocp.state('h', 'v*sin(gam)', 'm') \
.state('theta', 'v*cos(gam)/r', 'rad') \
.state('v', '-D/mass - mu*sin(gam)/r**2', 'm/s') \
.state('gam', 'L/(mass*v) + (v/r - mu/(v*r^2))*cos(gam)', 'rad')
# Define quantities used in the problem
ocp.quantity('rho', 'rho0*exp(-h/H)')
ocp.quantity('Cl', '(1.5658*alfa + -0.0000)')
ocp.quantity('Cd', '(1.6537*alfa^2 + 0.0612)')
ocp.quantity('D', '0.5*rho*v^2*Cd*Aref')
ocp.quantity('L', '0.5*rho*v^2*Cl*Aref')
ocp.quantity('r', 're+h')
# Define controls
ocp.control('alfa', 'rad')
# Define constants
ocp.constant('mu', 3.986e5 * 1e9, 'm^3/s^2') # Gravitational parameter, m^3/s^2
ocp.constant('rho0', 0.0001 * 1.2, 'kg/m^3') # Sea-level atmospheric density, kg/m^3
ocp.constant('H', 7500, 'm') # Scale height for atmosphere of Earth, m
ocp.constant('mass', 750 / 2.2046226, 'kg') # Mass of vehicle, kg
ocp.constant('re', 6378000, 'm') # Radius of planet, m
ocp.constant('Aref', pi * (24 * .0254 / 2) ** 2, 'm^2') # Reference area of vehicle, m^2
# Define costs
ocp.terminal_cost('-v^2', 'm^2/s^2')
# Define constraints
ocp.constraints() \
.initial('h-h_0', 'm') \
.initial('theta-theta_0', 'rad') \
.initial('v-v_0', 'm/s') \
.terminal('h-h_f', 'm') \
.terminal('theta-theta_f', 'rad')
ocp.scale(m='h', s='h/v', kg='mass', rad=1)
bvp_solver = beluga.bvp_algorithm('Shooting')
guess_maker = beluga.guess_generator('auto', start=[80000, 0, 4000, -90 * pi / 180], direction='forward', costate_guess=-0.1)
continuation_steps = beluga.init_continuation()
continuation_steps.add_step('bisection') \
.num_cases(11) \
.terminal('h', 0) \
.terminal('theta', 0.01 * pi / 180)
continuation_steps.add_step('bisection') \
.num_cases(11) \
.terminal('theta', 5.0 * pi / 180)
continuation_steps.add_step('bisection') \
.num_cases(11) \
.const('rho0', 1.2)
sol = beluga.solve(ocp, method='traditional', bvp_algorithm=bvp_solver, steps=continuation_steps, guess_generator=guess_maker)
y0 = sol.y[0]
yf = sol.y[-1]
y0e = [80000, 0, 4000, 0.0195, -16.8243, 1212433.8085, -2836.0620, 0]
yfe = [0, 0.0873, 2691.4733, -0.9383, 546.4540, 1212433.8085, -5382.9467, 0.1840]
tfe = 144.5677
assert sol.t.shape[0] == sol.y.shape[0]
assert sol.t.shape[0] == sol.u.shape[0]
assert sol.y.shape[1] == 8
assert sol.u.shape[1] == 1
assert abs((y0[0] - y0e[0]) / y0e[0]) < tol
assert abs((y0[1] - y0e[1])) < tol
assert abs((y0[2] - y0e[2]) / y0e[2]) < tol
assert abs((y0[3] - y0e[3]) / y0e[3]) < tol
assert abs((y0[4] - y0e[4]) / y0e[4]) < tol
assert abs((y0[5] - y0e[5]) / y0e[5]) < tol
assert abs((y0[6] - y0e[6]) / y0e[6]) < tol
assert abs((y0[7] - y0e[7])) < tol
assert abs((sol.t[-1] - tfe) / tfe) < tol
assert abs((yf[0] - yfe[0])) < tol
assert abs((yf[1] - yfe[1]) / yfe[1]) < tol
assert abs((yf[2] - yfe[2]) / yfe[2]) < tol
assert abs((yf[3] - yfe[3]) / yfe[3]) < tol
assert abs((yf[4] - yfe[4]) / yfe[4]) < tol
assert abs((yf[5] - yfe[5]) / yfe[5]) < tol
assert abs((yf[6] - yfe[6]) / yfe[6]) < tol
assert abs((yf[7] - yfe[7]) / yfe[7]) < tol
| 31.248062
| 155
| 0.57463
| 1,908
| 12,093
| 3.574948
| 0.095912
| 0.12139
| 0.151297
| 0.086204
| 0.808239
| 0.774227
| 0.753848
| 0.750916
| 0.744172
| 0.741534
| 0
| 0.084064
| 0.235674
| 12,093
| 386
| 156
| 31.329016
| 0.6539
| 0.060117
| 0
| 0.745455
| 0
| 0.003636
| 0.080745
| 0
| 0
| 0
| 0
| 0
| 0.443636
| 1
| 0.021818
| false
| 0
| 0.050909
| 0.007273
| 0.08
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d4242f62c7592082857aa1423dd2e99d9f4ae7fc
| 9,240
|
py
|
Python
|
ccws/test/test_okex.py
|
applezjm/testsub
|
051348bb852d8e3cefe764a6315f53da66cd413e
|
[
"MIT"
] | null | null | null |
ccws/test/test_okex.py
|
applezjm/testsub
|
051348bb852d8e3cefe764a6315f53da66cd413e
|
[
"MIT"
] | null | null | null |
ccws/test/test_okex.py
|
applezjm/testsub
|
051348bb852d8e3cefe764a6315f53da66cd413e
|
[
"MIT"
] | null | null | null |
from interruptingcow import timeout
from ccws.okex import Okex
from ccws.test.test_base import Test
from ccws.configs import HOME_PATH
class TestOkex(Test, Okex):
def __init__(self, *args, **kwargs):
Okex.__init__(self)
Test.__init__(self, *args, **kwargs)
def test_BTC_USDT_order(self):
origin = {
'FileName': 'BTC_USDT-okex_order.gz',
'Date': '2018/06/23',
'Output': 'BTC_USDT-okex.book.csv.gz',
}
self.initialization('BTC/USDT', 'order', origin['Date'])
input_key = self.Config['RedisCollectKey']
self.write_into_redis(input_key, self.RedisConnection, origin['FileName'])
try:
with timeout(30, exception=RuntimeWarning):
self.process_data()
except RuntimeWarning:
pass
try:
with timeout(30, exception=RuntimeWarning):
self.write_data_csv()
except RuntimeWarning:
pass
fn1 = origin['Output']
fn2 = '%s/%s/%s' % (HOME_PATH, origin['Date'], self.Config['FileName'])
self.compare_two_csv(fn1, fn2)
self.delete_tmp_file(fn2)
def test_BTC_USDT_ticker(self):
origin = {
'FileName': 'BTC_USDT-okex_ticker.gz',
'Date': '2018/06/23',
'Output': 'BTC_USDT-okex.trade.csv.gz',
}
self.initialization('BTC/USDT', 'trade', origin['Date'])
input_key = self.Config['RedisCollectKey']
self.write_into_redis(input_key, self.RedisConnection, origin['FileName'])
try:
with timeout(30, exception=RuntimeWarning):
self.process_data()
except RuntimeWarning:
pass
try:
with timeout(30, exception=RuntimeWarning):
self.write_data_csv()
except RuntimeWarning:
pass
fn1 = origin['Output']
fn2 = '%s/%s/%s' % (HOME_PATH, origin['Date'], self.Config['FileName'])
self.compare_two_csv(fn1, fn2)
self.delete_tmp_file(fn2)
def test_BCH_USDT_order(self):
origin = {
'FileName': 'BCH_USDT-okex_order.gz',
'Date': '2018/06/26',
'Output': 'BCH_USDT-okex.book.csv.gz',
}
self.initialization('BCH/USDT', 'order', origin['Date'])
input_key = self.Config['RedisCollectKey']
self.write_into_redis(input_key, self.RedisConnection, origin['FileName'])
try:
with timeout(60, exception=RuntimeWarning):
self.process_data()
except RuntimeWarning:
pass
try:
with timeout(60, exception=RuntimeWarning):
self.write_data_csv()
except RuntimeWarning:
pass
fn1 = origin['Output']
fn2 = '%s/%s/%s' % (HOME_PATH, origin['Date'], self.Config['FileName'])
self.compare_two_csv(fn1, fn2)
self.delete_tmp_file(fn2)
def test_BCH_USDT_ticker(self):
origin = {
'FileName': 'BCH_USDT-okex_ticker.gz',
'Date': '2018/06/26',
'Output': 'BCH_USDT-okex.trade.csv.gz',
}
self.initialization('BCH/USDT', 'trade', origin['Date'])
input_key = self.Config['RedisCollectKey']
self.write_into_redis(input_key, self.RedisConnection, origin['FileName'])
try:
with timeout(30, exception=RuntimeWarning):
self.process_data()
except RuntimeWarning:
pass
try:
with timeout(30, exception=RuntimeWarning):
self.write_data_csv()
except RuntimeWarning:
pass
fn1 = origin['Output']
fn2 = '%s/%s/%s' % (HOME_PATH, origin['Date'], self.Config['FileName'])
self.compare_two_csv(fn1, fn2)
self.delete_tmp_file(fn2)
def test_ETH_USDT_order(self):
origin = {
'FileName': 'ETH_USDT-okex_order.gz',
'Date': '2018/06/26',
'Output': 'ETH_USDT-okex.book.csv.gz',
}
self.initialization('ETH/USDT', 'order', origin['Date'])
input_key = self.Config['RedisCollectKey']
self.write_into_redis(input_key, self.RedisConnection, origin['FileName'])
try:
with timeout(60, exception=RuntimeWarning):
self.process_data()
except RuntimeWarning:
pass
try:
with timeout(60, exception=RuntimeWarning):
self.write_data_csv()
except RuntimeWarning:
pass
fn1 = origin['Output']
fn2 = '%s/%s/%s' % (HOME_PATH, origin['Date'], self.Config['FileName'])
self.compare_two_csv(fn1, fn2)
self.delete_tmp_file(fn2)
def test_ETH_USDT_ticker(self):
origin = {
'FileName': 'ETH_USDT-okex_ticker.gz',
'Date': '2018/06/26',
'Output': 'ETH_USDT-okex.trade.csv.gz',
}
self.initialization('ETH/USDT', 'trade', origin['Date'])
input_key = self.Config['RedisCollectKey']
self.write_into_redis(input_key, self.RedisConnection, origin['FileName'])
try:
with timeout(30, exception=RuntimeWarning):
self.process_data()
except RuntimeWarning:
pass
try:
with timeout(30, exception=RuntimeWarning):
self.write_data_csv()
except RuntimeWarning:
pass
fn1 = origin['Output']
fn2 = '%s/%s/%s' % (HOME_PATH, origin['Date'], self.Config['FileName'])
self.compare_two_csv(fn1, fn2)
self.delete_tmp_file(fn2)
def test_XRP_USDT_order(self):
origin = {
'FileName': 'XRP_USDT-okex_order.gz',
'Date': '2018/08/09',
'Output': 'XRP_USDT-okex.book.csv.gz',
}
self.initialization('XRP/USDT', 'order', origin['Date'])
input_key = self.Config['RedisCollectKey']
self.write_into_redis(input_key, self.RedisConnection, origin['FileName'])
try:
with timeout(10, exception=RuntimeWarning):
self.process_data()
except RuntimeWarning:
pass
try:
with timeout(10, exception=RuntimeWarning):
self.write_data_csv()
except RuntimeWarning:
pass
fn1 = origin['Output']
fn2 = '%s/%s/%s' % (HOME_PATH, origin['Date'], self.Config['FileName'])
self.compare_two_csv(fn1, fn2)
self.delete_tmp_file(fn2)
def test_XRP_USDT_ticker(self):
origin = {
'FileName': 'XRP_USDT-okex_ticker.gz',
'Date': '2018/08/09',
'Output': 'XRP_USDT-okex.trade.csv.gz',
}
self.initialization('XRP/USDT', 'trade', origin['Date'])
input_key = self.Config['RedisCollectKey']
self.write_into_redis(input_key, self.RedisConnection, origin['FileName'])
try:
with timeout(5, exception=RuntimeWarning):
self.process_data()
except RuntimeWarning:
pass
try:
with timeout(5, exception=RuntimeWarning):
self.write_data_csv()
except RuntimeWarning:
pass
fn1 = origin['Output']
fn2 = '%s/%s/%s' % (HOME_PATH, origin['Date'], self.Config['FileName'])
self.compare_two_csv(fn1, fn2)
self.delete_tmp_file(fn2)
def test_EOS_USDT_order(self):
origin = {
'FileName': 'EOS_USDT-okex_order.gz',
'Date': '2018/08/09',
'Output': 'EOS_USDT-okex.book.csv.gz',
}
self.initialization('EOS/USDT', 'order', origin['Date'])
input_key = self.Config['RedisCollectKey']
self.write_into_redis(input_key, self.RedisConnection, origin['FileName'])
try:
with timeout(10, exception=RuntimeWarning):
self.process_data()
except RuntimeWarning:
pass
try:
with timeout(10, exception=RuntimeWarning):
self.write_data_csv()
except RuntimeWarning:
pass
fn1 = origin['Output']
fn2 = '%s/%s/%s' % (HOME_PATH, origin['Date'], self.Config['FileName'])
self.compare_two_csv(fn1, fn2)
self.delete_tmp_file(fn2)
def test_EOS_USDT_ticker(self):
origin = {
'FileName': 'EOS_USDT-okex_ticker.gz',
'Date': '2018/08/09',
'Output': 'EOS_USDT-okex.trade.csv.gz',
}
self.initialization('EOS/USDT', 'trade', origin['Date'])
input_key = self.Config['RedisCollectKey']
self.write_into_redis(input_key, self.RedisConnection, origin['FileName'])
try:
with timeout(5, exception=RuntimeWarning):
self.process_data()
except RuntimeWarning:
pass
try:
with timeout(5, exception=RuntimeWarning):
self.write_data_csv()
except RuntimeWarning:
pass
fn1 = origin['Output']
fn2 = '%s/%s/%s' % (HOME_PATH, origin['Date'], self.Config['FileName'])
self.compare_two_csv(fn1, fn2)
self.delete_tmp_file(fn2)
| 31.752577
| 82
| 0.564286
| 999
| 9,240
| 5.025025
| 0.069069
| 0.055777
| 0.047809
| 0.045817
| 0.959761
| 0.947211
| 0.899402
| 0.839243
| 0.839243
| 0.755976
| 0
| 0.025933
| 0.307251
| 9,240
| 290
| 83
| 31.862069
| 0.758319
| 0
| 0
| 0.756303
| 0
| 0
| 0.15368
| 0.051948
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046218
| false
| 0.084034
| 0.016807
| 0
| 0.067227
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
d43298491d2b701dfce0ad75dd31ff8177aecfde
| 285
|
py
|
Python
|
venue/scrapers/exceptions.py
|
Volentix/venue-server
|
9d6b3580516ab321f98c48ce1151671086145841
|
[
"MIT"
] | 7
|
2018-08-01T16:30:01.000Z
|
2018-12-10T05:12:27.000Z
|
venue/scrapers/exceptions.py
|
Volentix/venue-server
|
9d6b3580516ab321f98c48ce1151671086145841
|
[
"MIT"
] | 103
|
2018-08-02T15:23:02.000Z
|
2018-12-13T03:48:15.000Z
|
venue/scrapers/exceptions.py
|
Volentix/venue-server
|
9d6b3580516ab321f98c48ce1151671086145841
|
[
"MIT"
] | null | null | null |
class ProfileDoesNotExist(Exception):
def __init__(self, message, info={}):
self.message = message
self.info = info
class ScraperError(Exception):
def __init__(self, message, info={}):
self.message = message
self.info = info
| 21.923077
| 41
| 0.6
| 28
| 285
| 5.821429
| 0.321429
| 0.269939
| 0.196319
| 0.245399
| 0.748466
| 0.748466
| 0.748466
| 0.748466
| 0.748466
| 0.748466
| 0
| 0
| 0.294737
| 285
| 12
| 42
| 23.75
| 0.810945
| 0
| 0
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
d43cebeffb46a8bfcec73af27b4f404262804c7f
| 7,887
|
py
|
Python
|
mechdb_core/migrations/0001_initial.py
|
hadmiru/mdb
|
133f56b6a6ff1600bfe93e227d922f4a22e58832
|
[
"Apache-2.0"
] | null | null | null |
mechdb_core/migrations/0001_initial.py
|
hadmiru/mdb
|
133f56b6a6ff1600bfe93e227d922f4a22e58832
|
[
"Apache-2.0"
] | null | null | null |
mechdb_core/migrations/0001_initial.py
|
hadmiru/mdb
|
133f56b6a6ff1600bfe93e227d922f4a22e58832
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0.5 on 2018-05-26 19:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_start_date', models.DateTimeField(default=django.utils.timezone.now)),
('action_end_date', models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True)),
('scheduled', models.BooleanField(default=False)),
('description', models.TextField(blank=True, null=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Action_type',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Container',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('name', models.CharField(max_length=200)),
('descripton', models.TextField(blank=True, null=True)),
('in_container_id', models.IntegerField(blank=True, null=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Equipment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('sizename', models.CharField(max_length=200)),
('serial_number', models.CharField(blank=True, max_length=50, null=True)),
('registration_number', models.CharField(blank=True, max_length=50, null=True)),
('in_container', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='mechdb_core.Container')),
],
),
migrations.CreateModel(
name='Equipment_sizename',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Manufacturer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('name', models.CharField(max_length=200)),
('descripton', models.TextField(blank=True, null=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Movement_action',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_date', models.DateTimeField(default=django.utils.timezone.now)),
('description', models.TextField(blank=True, null=True)),
('quantity', models.FloatField(default=0)),
('new_container', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='mechdb_core.Container')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('used_in_action', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='mechdb_core.Action')),
],
),
migrations.CreateModel(
name='Spare_part',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('name', models.CharField(max_length=200)),
('quantity', models.FloatField(default=0)),
('unit_name', models.CharField(max_length=10)),
('in_container', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='mechdb_core.Container')),
('manufacturer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='mechdb_core.Manufacturer')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Supply_provider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('name', models.CharField(max_length=200)),
('descripton', models.TextField(blank=True, null=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='spare_part',
name='supply_provider',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='mechdb_core.Supply_provider'),
),
migrations.AddField(
model_name='spare_part',
name='used_in_equipment',
field=models.ManyToManyField(to='mechdb_core.Equipment_sizename'),
),
migrations.AddField(
model_name='movement_action',
name='used_spare_part',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mechdb_core.Spare_part'),
),
migrations.AddField(
model_name='equipment',
name='manufacturer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='mechdb_core.Manufacturer'),
),
migrations.AddField(
model_name='equipment',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='equipment',
name='supply_provider',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='mechdb_core.Supply_provider'),
),
migrations.AddField(
model_name='action',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='mechdb_core.Action_type'),
),
migrations.AddField(
model_name='action',
name='used_in_equipment',
field=models.ManyToManyField(to='mechdb_core.Equipment_sizename'),
),
]
| 51.888158
| 152
| 0.614048
| 826
| 7,887
| 5.688862
| 0.118644
| 0.03405
| 0.056608
| 0.088955
| 0.83571
| 0.798255
| 0.770164
| 0.754841
| 0.732709
| 0.721856
| 0
| 0.006766
| 0.250412
| 7,887
| 151
| 153
| 52.231788
| 0.788058
| 0.005706
| 0
| 0.708333
| 1
| 0
| 0.124745
| 0.034439
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027778
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d48795a3e619309e0e7885166f714e28cdc95568
| 16,340
|
py
|
Python
|
tests/integration/test_users.py
|
langrenn-sprint/user-service
|
c7b517f5ba600772f673e153b1086fee5278b14a
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_users.py
|
langrenn-sprint/user-service
|
c7b517f5ba600772f673e153b1086fee5278b14a
|
[
"Apache-2.0"
] | 2
|
2021-06-30T13:06:34.000Z
|
2022-02-01T11:18:15.000Z
|
tests/integration/test_users.py
|
langrenn-sprint/user-service
|
c7b517f5ba600772f673e153b1086fee5278b14a
|
[
"Apache-2.0"
] | null | null | null |
"""Integration test cases for the users route."""
import os
from typing import Any
from aiohttp import hdrs
from aiohttp.test_utils import TestClient as _TestClient
import jwt
import pytest
from pytest_mock import MockFixture
ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
@pytest.fixture
def token() -> str:
"""Create a valid token."""
secret = os.getenv("JWT_SECRET")
algorithm = "HS256"
payload = {"username": os.getenv("ADMIN_USERNAME"), "role": "admin"}
return jwt.encode(payload, secret, algorithm) # type: ignore
@pytest.fixture
def token_nonprivileged_user() -> str:
"""Create a valid token."""
secret = os.getenv("JWT_SECRET")
algorithm = "HS256"
payload = {"username": "nonprivileged", "role": "nonprivileged"}
return jwt.encode(payload, secret, algorithm) # type: ignore
async def mock_user(db: Any, username: str) -> dict:
"""Create a mock user object."""
return { # noqa: S106
"id": ID,
"username": "admin",
"password": "password",
"role": "admin",
}
async def mock_user_insufficent_role(db: Any, username: str) -> dict:
"""Create a mock user object."""
return { # noqa: S106
"id": ID,
"username": "nonprivileged",
"password": "password",
"role": "event-admin",
}
async def mock_user_object(db: Any, username: str) -> dict:
"""Create a mock user object."""
return { # noqa: S106
"id": ID,
"username": "some.user@example.com",
"password": "secret",
"role": "test",
}
async def mock_authorize(db: Any, token: Any, roles: Any) -> None:
"""Pass autorization."""
pass
@pytest.mark.integration
async def test_create_user(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return Created, location header."""
mocker.patch(
"user_service.services.users_service.create_id",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.create_user",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
mocker.patch(
"user_service.services.AuthorizationService.authorize",
side_effect=mock_authorize,
)
request_body = {
"username": "user@example.com",
"password": "secret",
"role": "test",
}
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
resp = await client.post("/users", headers=headers, json=request_body)
assert resp.status == 201
assert f"/users/{ID}" in resp.headers[hdrs.LOCATION]
@pytest.mark.integration
async def test_get_user_by_id(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
) -> None:
"""Should return OK, and a body containing one user."""
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_id",
side_effect=mock_user_object,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
mocker.patch(
"user_service.services.AuthorizationService.authorize",
side_effect=mock_authorize,
)
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
resp = await client.get(f"/users/{ID}", headers=headers)
assert resp.status == 200
assert "application/json" in resp.headers[hdrs.CONTENT_TYPE]
user = await resp.json()
assert type(user) is dict
assert user["id"] == ID
@pytest.mark.integration
async def test_update_user_by_id(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return No Content."""
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_id",
side_effect=mock_user_object,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.update_user",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
mocker.patch(
"user_service.services.AuthorizationService.authorize",
side_effect=mock_authorize,
)
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
request_body = {
"id": ID,
"username": "updated.user@example.com",
"password": "secret",
"role": "test",
}
resp = await client.put(f"/users/{ID}", headers=headers, json=request_body)
assert resp.status == 204
@pytest.mark.integration
async def test_list_users(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return OK and a valid json body."""
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_all_users",
return_value=[{"id": ID, "username": "Oslo Skagen Sprint"}],
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
mocker.patch(
"user_service.services.AuthorizationService.authorize",
side_effect=mock_authorize,
)
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
resp = await client.get("/users", headers=headers)
assert resp.status == 200
assert "application/json" in resp.headers[hdrs.CONTENT_TYPE]
users = await resp.json()
assert type(users) is list
assert len(users) > 0
@pytest.mark.integration
async def test_delete_user_by_id(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return No Content."""
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_id",
side_effect=mock_user_object,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.delete_user",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
mocker.patch(
"user_service.services.AuthorizationService.authorize",
side_effect=mock_authorize,
)
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
resp = await client.delete(f"/users/{ID}", headers=headers)
assert resp.status == 204
# Bad cases
@pytest.mark.integration
async def test_create_user_invalid_input(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return 422 Unprocessable Entity."""
mocker.patch(
"user_service.services.users_service.create_id",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.create_user",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
mocker.patch(
"user_service.services.AuthorizationService.authorize",
side_effect=mock_authorize,
)
request_body_lacks_role = {
"username": "user@example.com",
"password": "secret",
}
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
resp = await client.post("/users", headers=headers, json=request_body_lacks_role)
assert resp.status == 422
@pytest.mark.integration
async def test_create_user_with_id(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return 422 Unprocessable Entity."""
mocker.patch(
"user_service.services.users_service.create_id",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.create_user",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
mocker.patch(
"user_service.services.AuthorizationService.authorize",
side_effect=mock_authorize,
)
request_body_with_id = {
"id": ID,
"username": "user@example.com",
"password": "secret",
"role": "test_role",
}
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
resp = await client.post("/users", headers=headers, json=request_body_with_id)
assert resp.status == 422
@pytest.mark.integration
async def test_create_user_with_username_admin(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return 422 Unprocessable Entity."""
mocker.patch(
"user_service.services.users_service.create_id",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.create_user",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
mocker.patch(
"user_service.services.AuthorizationService.authorize",
side_effect=mock_authorize,
)
request_body = {
"username": "admin",
"password": "secret",
"role": "test_role",
}
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
resp = await client.post("/users", headers=headers, json=request_body)
assert resp.status == 422
@pytest.mark.integration
async def test_create_user_returns_none(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return 400 Bad Request."""
mocker.patch(
"user_service.services.users_service.create_id",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.create_user",
return_value=None,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
mocker.patch(
"user_service.services.AuthorizationService.authorize",
side_effect=mock_authorize,
)
request_body_lacks_role = {
"username": "user@example.com",
"role": "test_role",
"password": "secret",
}
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
resp = await client.post("/users", headers=headers, json=request_body_lacks_role)
assert resp.status == 400
@pytest.mark.integration
async def test_update_user_invalid_input(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return 422 Unprocessable Entity."""
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_id",
side_effect=mock_user_object,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.update_user",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
mocker.patch(
"user_service.services.AuthorizationService.authorize",
side_effect=mock_authorize,
)
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
request_body_lacks_role = {
"id": ID,
"username": "updated.user@example.com",
"password": "secret",
}
resp = await client.put(
f"/users/{ID}", headers=headers, json=request_body_lacks_role
)
assert resp.status == 422
@pytest.mark.integration
async def test_update_user_set_username_to_admin(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return 422 Unprocessable Entity."""
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_id",
side_effect=mock_user_object,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.update_user",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
mocker.patch(
"user_service.services.AuthorizationService.authorize",
side_effect=mock_authorize,
)
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
request_body = {
"id": ID,
"username": "admin",
"password": "secret",
"role": "test_role",
}
resp = await client.put(f"/users/{ID}", headers=headers, json=request_body)
assert resp.status == 422
@pytest.mark.integration
async def test_update_user_change_ID(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return 422 Unprocessable Entity."""
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_id",
side_effect=mock_user_object,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.update_user",
return_value=ID,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
mocker.patch(
"user_service.services.AuthorizationService.authorize",
side_effect=mock_authorize,
)
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
request_body = {
"id": "DifferentId",
"username": "some.user@example.com",
"password": "secret",
"role": "test_role",
}
resp = await client.put(f"/users/{ID}", headers=headers, json=request_body)
assert resp.status == 422
# NOT FOUND CASES:
@pytest.mark.integration
async def test_get_user_not_found(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return 404 Not found."""
ID = "does-not-exist"
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_id",
return_value=None,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
resp = await client.get(f"/users/{ID}", headers=headers)
assert resp.status == 404
@pytest.mark.integration
async def test_update_user_not_found(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return 404 Not found."""
ID = "does-not-exist"
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_id",
return_value=None,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.update_user",
return_value=None,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
request_body = {
"id": ID,
"username": "updated.user@example.com",
"password": "secret",
"role": "test",
}
ID = "does-not-exist"
resp = await client.put(f"/users/{ID}", headers=headers, json=request_body)
assert resp.status == 404
@pytest.mark.integration
async def test_delete_user_not_found(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return 404 Not found."""
ID = "does-not-exist"
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_id",
return_value=None,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.delete_user",
return_value=None,
)
mocker.patch(
"user_service.adapters.users_adapter.UsersAdapter.get_user_by_username",
side_effect=mock_user,
)
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
resp = await client.delete(f"/users/{ID}", headers=headers)
assert resp.status == 404
| 28.027444
| 85
| 0.656732
| 1,843
| 16,340
| 5.593597
| 0.074878
| 0.05762
| 0.078572
| 0.115239
| 0.918906
| 0.908721
| 0.905519
| 0.903579
| 0.861383
| 0.831991
| 0
| 0.008835
| 0.224174
| 16,340
| 582
| 86
| 28.075601
| 0.80437
| 0.010649
| 0
| 0.709474
| 0
| 0
| 0.301642
| 0.219547
| 0
| 0
| 0
| 0
| 0.046316
| 1
| 0.004211
| false
| 0.029474
| 0.014737
| 0
| 0.029474
| 0.002105
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d489075319dbd3c1f5f5ea4b1f97d2b9ec624b4e
| 251
|
py
|
Python
|
tfne/encodings/codeepneat/__init__.py
|
githealthy18/Tensorflow-Neuroevolution
|
15f6e906e8000c64f5c9a60907f53fe835f0b28c
|
[
"Apache-2.0"
] | 121
|
2019-06-27T17:30:52.000Z
|
2022-03-24T07:32:42.000Z
|
tfne/encodings/codeepneat/__init__.py
|
githealthy18/Tensorflow-Neuroevolution
|
15f6e906e8000c64f5c9a60907f53fe835f0b28c
|
[
"Apache-2.0"
] | 10
|
2021-01-03T19:52:13.000Z
|
2022-02-10T00:15:26.000Z
|
tfne/encodings/codeepneat/__init__.py
|
githealthy18/Tensorflow-Neuroevolution
|
15f6e906e8000c64f5c9a60907f53fe835f0b28c
|
[
"Apache-2.0"
] | 31
|
2019-07-31T10:45:53.000Z
|
2022-03-21T08:31:09.000Z
|
# Import CoDeepNEAT module package
from tfne.encodings.codeepneat import modules
# Import modules
from tfne.encodings.codeepneat.codeepneat_genome import CoDeepNEATGenome
from tfne.encodings.codeepneat.codeepneat_blueprint import CoDeepNEATBlueprint
| 35.857143
| 78
| 0.876494
| 28
| 251
| 7.785714
| 0.428571
| 0.110092
| 0.233945
| 0.37156
| 0.33945
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083665
| 251
| 6
| 79
| 41.833333
| 0.947826
| 0.187251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2e6b967a9a5f103494a9e2c9e27b85cb559cf16a
| 53,197
|
py
|
Python
|
pyboto3/polly.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 91
|
2016-12-31T11:38:37.000Z
|
2021-09-16T19:33:23.000Z
|
pyboto3/polly.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 7
|
2017-01-02T18:54:23.000Z
|
2020-08-11T13:54:02.000Z
|
pyboto3/polly.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 26
|
2016-12-31T13:11:00.000Z
|
2022-03-03T21:01:12.000Z
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def delete_lexicon(Name=None):
"""
Deletes the specified pronunciation lexicon stored in an AWS Region. A lexicon which has been deleted is not available for speech synthesis, nor is it possible to retrieve it using either the GetLexicon or ListLexicon APIs.
For more information, see Managing Lexicons .
See also: AWS API Documentation
Exceptions
Examples
Deletes a specified pronunciation lexicon stored in an AWS Region.
Expected Output:
:example: response = client.delete_lexicon(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the lexicon to delete. Must be an existing lexicon in the region.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Polly.Client.exceptions.LexiconNotFoundException
Polly.Client.exceptions.ServiceFailureException
Examples
Deletes a specified pronunciation lexicon stored in an AWS Region.
response = client.delete_lexicon(
Name='example',
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
Polly.Client.exceptions.LexiconNotFoundException
Polly.Client.exceptions.ServiceFailureException
"""
pass
def describe_voices(Engine=None, LanguageCode=None, IncludeAdditionalLanguageCodes=None, NextToken=None):
"""
Returns the list of voices that are available for use when requesting speech synthesis. Each voice speaks a specified language, is either male or female, and is identified by an ID, which is the ASCII version of the voice name.
When synthesizing speech ( SynthesizeSpeech ), you provide the voice ID for the voice you want from the list of voices returned by DescribeVoices .
For example, you want your news reader application to read news in a specific language, but giving a user the option to choose the voice. Using the DescribeVoices operation you can provide the user with a list of available voices to select from.
You can optionally specify a language code to filter the available voices. For example, if you specify en-US , the operation returns a list of all available US English voices.
This operation requires permissions to perform the polly:DescribeVoices action.
See also: AWS API Documentation
Exceptions
Examples
Returns the list of voices that are available for use when requesting speech synthesis. Displayed languages are those within the specified language code. If no language code is specified, voices for all available languages are displayed.
Expected Output:
:example: response = client.describe_voices(
Engine='standard'|'neural',
LanguageCode='arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
IncludeAdditionalLanguageCodes=True|False,
NextToken='string'
)
:type Engine: string
:param Engine: Specifies the engine (standard or neural ) used by Amazon Polly when processing input text for speech synthesis.
:type LanguageCode: string
:param LanguageCode: The language identification tag (ISO 639 code for the language name-ISO 3166 country code) for filtering the list of voices returned. If you don\'t specify this optional parameter, all available voices are returned.
:type IncludeAdditionalLanguageCodes: boolean
:param IncludeAdditionalLanguageCodes: Boolean value indicating whether to return any bilingual voices that use the specified language as an additional language. For instance, if you request all languages that use US English (es-US), and there is an Italian voice that speaks both Italian (it-IT) and US English, that voice will be included if you specify yes but not if you specify no .
:type NextToken: string
:param NextToken: An opaque pagination token returned from the previous DescribeVoices operation. If present, this indicates where to continue the listing.
:rtype: dict
ReturnsResponse Syntax
{
'Voices': [
{
'Gender': 'Female'|'Male',
'Id': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
'LanguageName': 'string',
'Name': 'string',
'AdditionalLanguageCodes': [
'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
],
'SupportedEngines': [
'standard'|'neural',
]
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Voices (list) --
A list of voices with their properties.
(dict) --
Description of the voice.
Gender (string) --
Gender of the voice.
Id (string) --
Amazon Polly assigned voice ID. This is the ID that you specify when calling the SynthesizeSpeech operation.
LanguageCode (string) --
Language code of the voice.
LanguageName (string) --
Human readable name of the language in English.
Name (string) --
Name of the voice (for example, Salli, Kendra, etc.). This provides a human readable voice name that you might display in your application.
AdditionalLanguageCodes (list) --
Additional codes for languages available for the specified voice in addition to its default language.
For example, the default language for Aditi is Indian English (en-IN) because it was first used for that language. Since Aditi is bilingual and fluent in both Indian English and Hindi, this parameter would show the code hi-IN .
(string) --
SupportedEngines (list) --
Specifies which engines (standard or neural ) that are supported by a given voice.
(string) --
NextToken (string) --
The pagination token to use in the next request to continue the listing of voices. NextToken is returned only if the response is truncated.
Exceptions
Polly.Client.exceptions.InvalidNextTokenException
Polly.Client.exceptions.ServiceFailureException
Examples
Returns the list of voices that are available for use when requesting speech synthesis. Displayed languages are those within the specified language code. If no language code is specified, voices for all available languages are displayed.
response = client.describe_voices(
LanguageCode='en-GB',
)
print(response)
Expected Output:
{
'Voices': [
{
'Gender': 'Female',
'Id': 'Emma',
'LanguageCode': 'en-GB',
'LanguageName': 'British English',
'Name': 'Emma',
},
{
'Gender': 'Male',
'Id': 'Brian',
'LanguageCode': 'en-GB',
'LanguageName': 'British English',
'Name': 'Brian',
},
{
'Gender': 'Female',
'Id': 'Amy',
'LanguageCode': 'en-GB',
'LanguageName': 'British English',
'Name': 'Amy',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Voices': [
{
'Gender': 'Female'|'Male',
'Id': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
'LanguageName': 'string',
'Name': 'string',
'AdditionalLanguageCodes': [
'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
],
'SupportedEngines': [
'standard'|'neural',
]
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_lexicon(Name=None):
"""
Returns the content of the specified pronunciation lexicon stored in an AWS Region. For more information, see Managing Lexicons .
See also: AWS API Documentation
Exceptions
Examples
Returns the content of the specified pronunciation lexicon stored in an AWS Region.
Expected Output:
:example: response = client.get_lexicon(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the lexicon.\n
:rtype: dict
ReturnsResponse Syntax{
'Lexicon': {
'Content': 'string',
'Name': 'string'
},
'LexiconAttributes': {
'Alphabet': 'string',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
'LastModified': datetime(2015, 1, 1),
'LexiconArn': 'string',
'LexemesCount': 123,
'Size': 123
}
}
Response Structure
(dict) --
Lexicon (dict) --Lexicon object that provides name and the string content of the lexicon.
Content (string) --Lexicon content in string format. The content of a lexicon must be in PLS format.
Name (string) --Name of the lexicon.
LexiconAttributes (dict) --Metadata of the lexicon, including phonetic alphabetic used, language code, lexicon ARN, number of lexemes defined in the lexicon, and size of lexicon in bytes.
Alphabet (string) --Phonetic alphabet used in the lexicon. Valid values are ipa and x-sampa .
LanguageCode (string) --Language code that the lexicon applies to. A lexicon with a language code such as "en" would be applied to all English languages (en-GB, en-US, en-AUS, en-WLS, and so on.
LastModified (datetime) --Date lexicon was last modified (a timestamp value).
LexiconArn (string) --Amazon Resource Name (ARN) of the lexicon.
LexemesCount (integer) --Number of lexemes in the lexicon.
Size (integer) --Total size of the lexicon, in characters.
Exceptions
Polly.Client.exceptions.LexiconNotFoundException
Polly.Client.exceptions.ServiceFailureException
Examples
Returns the content of the specified pronunciation lexicon stored in an AWS Region.
response = client.get_lexicon(
Name='',
)
print(response)
Expected Output:
{
'Lexicon': {
'Content': '<?xml version="1.0" encoding="UTF-8"?>\\r\
<lexicon version="1.0" \\r\
xmlns="http://www.w3.org/2005/01/pronunciation-lexicon"\\r\
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" \\r\
xsi:schemaLocation="http://www.w3.org/2005/01/pronunciation-lexicon \\r\
http://www.w3.org/TR/2007/CR-pronunciation-lexicon-20071212/pls.xsd"\\r\
alphabet="ipa" \\r\
xml:lang="en-US">\\r\
<lexeme>\\r\
<grapheme>W3C</grapheme>\\r\
<alias>World Wide Web Consortium</alias>\\r\
</lexeme>\\r\
</lexicon>',
'Name': 'example',
},
'LexiconAttributes': {
'Alphabet': 'ipa',
'LanguageCode': 'en-US',
'LastModified': 1478542980.117,
'LexemesCount': 1,
'LexiconArn': 'arn:aws:polly:us-east-1:123456789012:lexicon/example',
'Size': 503,
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Lexicon': {
'Content': 'string',
'Name': 'string'
},
'LexiconAttributes': {
'Alphabet': 'string',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
'LastModified': datetime(2015, 1, 1),
'LexiconArn': 'string',
'LexemesCount': 123,
'Size': 123
}
}
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_speech_synthesis_task(TaskId=None):
"""
Retrieves a specific SpeechSynthesisTask object based on its TaskID. This object contains information about the given speech synthesis task, including the status of the task, and a link to the S3 bucket containing the output of the task.
See also: AWS API Documentation
Exceptions
:example: response = client.get_speech_synthesis_task(
TaskId='string'
)
:type TaskId: string
:param TaskId: [REQUIRED]\nThe Amazon Polly generated identifier for a speech synthesis task.\n
:rtype: dict
ReturnsResponse Syntax{
'SynthesisTask': {
'Engine': 'standard'|'neural',
'TaskId': 'string',
'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',
'TaskStatusReason': 'string',
'OutputUri': 'string',
'CreationTime': datetime(2015, 1, 1),
'RequestCharacters': 123,
'SnsTopicArn': 'string',
'LexiconNames': [
'string',
],
'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',
'SampleRate': 'string',
'SpeechMarkTypes': [
'sentence'|'ssml'|'viseme'|'word',
],
'TextType': 'ssml'|'text',
'VoiceId': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'
}
}
Response Structure
(dict) --
SynthesisTask (dict) --SynthesisTask object that provides information from the requested task, including output format, creation time, task status, and so on.
Engine (string) --Specifies the engine (standard or neural ) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.
TaskId (string) --The Amazon Polly generated identifier for a speech synthesis task.
TaskStatus (string) --Current status of the individual speech synthesis task.
TaskStatusReason (string) --Reason for the current status of a specific speech synthesis task, including errors if the task has failed.
OutputUri (string) --Pathway for the output speech file.
CreationTime (datetime) --Timestamp for the time the synthesis task was started.
RequestCharacters (integer) --Number of billable characters synthesized.
SnsTopicArn (string) --ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.
LexiconNames (list) --List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.
(string) --
OutputFormat (string) --The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.
SampleRate (string) --The audio frequency specified in Hz.
The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000".
Valid values for pcm are "8000" and "16000" The default value is "16000".
SpeechMarkTypes (list) --The type of speech marks returned for the input text.
(string) --
TextType (string) --Specifies whether the input text is plain text or SSML. The default value is plain text.
VoiceId (string) --Voice ID to use for the synthesis.
LanguageCode (string) --Optional language code for a synthesis task. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).
If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.
Exceptions
Polly.Client.exceptions.InvalidTaskIdException
Polly.Client.exceptions.ServiceFailureException
Polly.Client.exceptions.SynthesisTaskNotFoundException
:return: {
'SynthesisTask': {
'Engine': 'standard'|'neural',
'TaskId': 'string',
'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',
'TaskStatusReason': 'string',
'OutputUri': 'string',
'CreationTime': datetime(2015, 1, 1),
'RequestCharacters': 123,
'SnsTopicArn': 'string',
'LexiconNames': [
'string',
],
'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',
'SampleRate': 'string',
'SpeechMarkTypes': [
'sentence'|'ssml'|'viseme'|'word',
],
'TextType': 'ssml'|'text',
'VoiceId': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'
}
}
:returns:
(string) --
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def list_lexicons(NextToken=None):
"""
Returns a list of pronunciation lexicons stored in an AWS Region. For more information, see Managing Lexicons .
See also: AWS API Documentation
Exceptions
Examples
Returns a list of pronunciation lexicons stored in an AWS Region.
Expected Output:
:example: response = client.list_lexicons(
NextToken='string'
)
:type NextToken: string
:param NextToken: An opaque pagination token returned from previous ListLexicons operation. If present, indicates where to continue the list of lexicons.
:rtype: dict
ReturnsResponse Syntax{
'Lexicons': [
{
'Name': 'string',
'Attributes': {
'Alphabet': 'string',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
'LastModified': datetime(2015, 1, 1),
'LexiconArn': 'string',
'LexemesCount': 123,
'Size': 123
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Lexicons (list) --A list of lexicon names and attributes.
(dict) --Describes the content of the lexicon.
Name (string) --Name of the lexicon.
Attributes (dict) --Provides lexicon metadata.
Alphabet (string) --Phonetic alphabet used in the lexicon. Valid values are ipa and x-sampa .
LanguageCode (string) --Language code that the lexicon applies to. A lexicon with a language code such as "en" would be applied to all English languages (en-GB, en-US, en-AUS, en-WLS, and so on.
LastModified (datetime) --Date lexicon was last modified (a timestamp value).
LexiconArn (string) --Amazon Resource Name (ARN) of the lexicon.
LexemesCount (integer) --Number of lexemes in the lexicon.
Size (integer) --Total size of the lexicon, in characters.
NextToken (string) --The pagination token to use in the next request to continue the listing of lexicons. NextToken is returned only if the response is truncated.
Exceptions
Polly.Client.exceptions.InvalidNextTokenException
Polly.Client.exceptions.ServiceFailureException
Examples
Returns a list of pronunciation lexicons stored in an AWS Region.
response = client.list_lexicons(
)
print(response)
Expected Output:
{
'Lexicons': [
{
'Attributes': {
'Alphabet': 'ipa',
'LanguageCode': 'en-US',
'LastModified': 1478542980.117,
'LexemesCount': 1,
'LexiconArn': 'arn:aws:polly:us-east-1:123456789012:lexicon/example',
'Size': 503,
},
'Name': 'example',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Lexicons': [
{
'Name': 'string',
'Attributes': {
'Alphabet': 'string',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
'LastModified': datetime(2015, 1, 1),
'LexiconArn': 'string',
'LexemesCount': 123,
'Size': 123
}
},
],
'NextToken': 'string'
}
"""
pass
def list_speech_synthesis_tasks(MaxResults=None, NextToken=None, Status=None):
"""
Returns a list of SpeechSynthesisTask objects ordered by their creation date. This operation can filter the tasks by their status, for example, allowing users to list only tasks that are completed.
See also: AWS API Documentation
Exceptions
:example: response = client.list_speech_synthesis_tasks(
MaxResults=123,
NextToken='string',
Status='scheduled'|'inProgress'|'completed'|'failed'
)
:type MaxResults: integer
:param MaxResults: Maximum number of speech synthesis tasks returned in a List operation.
:type NextToken: string
:param NextToken: The pagination token to use in the next request to continue the listing of speech synthesis tasks.
:type Status: string
:param Status: Status of the speech synthesis tasks returned in a List operation
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'SynthesisTasks': [
{
'Engine': 'standard'|'neural',
'TaskId': 'string',
'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',
'TaskStatusReason': 'string',
'OutputUri': 'string',
'CreationTime': datetime(2015, 1, 1),
'RequestCharacters': 123,
'SnsTopicArn': 'string',
'LexiconNames': [
'string',
],
'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',
'SampleRate': 'string',
'SpeechMarkTypes': [
'sentence'|'ssml'|'viseme'|'word',
],
'TextType': 'ssml'|'text',
'VoiceId': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'
},
]
}
Response Structure
(dict) --
NextToken (string) --
An opaque pagination token returned from the previous List operation in this request. If present, this indicates where to continue the listing.
SynthesisTasks (list) --
List of SynthesisTask objects that provides information from the specified task in the list request, including output format, creation time, task status, and so on.
(dict) --
SynthesisTask object that provides information about a speech synthesis task.
Engine (string) --
Specifies the engine (standard or neural ) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.
TaskId (string) --
The Amazon Polly generated identifier for a speech synthesis task.
TaskStatus (string) --
Current status of the individual speech synthesis task.
TaskStatusReason (string) --
Reason for the current status of a specific speech synthesis task, including errors if the task has failed.
OutputUri (string) --
Pathway for the output speech file.
CreationTime (datetime) --
Timestamp for the time the synthesis task was started.
RequestCharacters (integer) --
Number of billable characters synthesized.
SnsTopicArn (string) --
ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.
LexiconNames (list) --
List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.
(string) --
OutputFormat (string) --
The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.
SampleRate (string) --
The audio frequency specified in Hz.
The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000".
Valid values for pcm are "8000" and "16000" The default value is "16000".
SpeechMarkTypes (list) --
The type of speech marks returned for the input text.
(string) --
TextType (string) --
Specifies whether the input text is plain text or SSML. The default value is plain text.
VoiceId (string) --
Voice ID to use for the synthesis.
LanguageCode (string) --
Optional language code for a synthesis task. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).
If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.
Exceptions
Polly.Client.exceptions.InvalidNextTokenException
Polly.Client.exceptions.ServiceFailureException
:return: {
'NextToken': 'string',
'SynthesisTasks': [
{
'Engine': 'standard'|'neural',
'TaskId': 'string',
'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',
'TaskStatusReason': 'string',
'OutputUri': 'string',
'CreationTime': datetime(2015, 1, 1),
'RequestCharacters': 123,
'SnsTopicArn': 'string',
'LexiconNames': [
'string',
],
'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',
'SampleRate': 'string',
'SpeechMarkTypes': [
'sentence'|'ssml'|'viseme'|'word',
],
'TextType': 'ssml'|'text',
'VoiceId': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'
},
]
}
:returns:
(string) --
"""
pass
def put_lexicon(Name=None, Content=None):
"""
Stores a pronunciation lexicon in an AWS Region. If a lexicon with the same name already exists in the region, it is overwritten by the new lexicon. Lexicon operations have eventual consistency, therefore, it might take some time before the lexicon is available to the SynthesizeSpeech operation.
For more information, see Managing Lexicons .
See also: AWS API Documentation
Exceptions
Examples
Stores a pronunciation lexicon in an AWS Region.
Expected Output:
:example: response = client.put_lexicon(
Name='string',
Content='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the lexicon. The name must follow the regular express format [0-9A-Za-z]{1,20}. That is, the name is a case-sensitive alphanumeric string up to 20 characters long.\n
:type Content: string
:param Content: [REQUIRED]\nContent of the PLS lexicon as string data.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Polly.Client.exceptions.InvalidLexiconException
Polly.Client.exceptions.UnsupportedPlsAlphabetException
Polly.Client.exceptions.UnsupportedPlsLanguageException
Polly.Client.exceptions.LexiconSizeExceededException
Polly.Client.exceptions.MaxLexemeLengthExceededException
Polly.Client.exceptions.MaxLexiconsNumberExceededException
Polly.Client.exceptions.ServiceFailureException
Examples
Stores a pronunciation lexicon in an AWS Region.
response = client.put_lexicon(
Content='file://example.pls',
Name='W3C',
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
(dict) --
"""
pass
def start_speech_synthesis_task(Engine=None, LanguageCode=None, LexiconNames=None, OutputFormat=None, OutputS3BucketName=None, OutputS3KeyPrefix=None, SampleRate=None, SnsTopicArn=None, SpeechMarkTypes=None, Text=None, TextType=None, VoiceId=None):
"""
Allows the creation of an asynchronous synthesis task, by starting a new SpeechSynthesisTask . This operation requires all the standard information needed for speech synthesis, plus the name of an Amazon S3 bucket for the service to store the output of the synthesis task and two optional parameters (OutputS3KeyPrefix and SnsTopicArn). Once the synthesis task is created, this operation will return a SpeechSynthesisTask object, which will include an identifier of this task as well as the current status.
See also: AWS API Documentation
Exceptions
:example: response = client.start_speech_synthesis_task(
Engine='standard'|'neural',
LanguageCode='arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
LexiconNames=[
'string',
],
OutputFormat='json'|'mp3'|'ogg_vorbis'|'pcm',
OutputS3BucketName='string',
OutputS3KeyPrefix='string',
SampleRate='string',
SnsTopicArn='string',
SpeechMarkTypes=[
'sentence'|'ssml'|'viseme'|'word',
],
Text='string',
TextType='ssml'|'text',
VoiceId='Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu'
)
:type Engine: string
:param Engine: Specifies the engine (standard or neural ) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.
:type LanguageCode: string
:param LanguageCode: Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).\nIf a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.\n
:type LexiconNames: list
:param LexiconNames: List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.\n\n(string) --\n\n
:type OutputFormat: string
:param OutputFormat: [REQUIRED]\nThe format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.\n
:type OutputS3BucketName: string
:param OutputS3BucketName: [REQUIRED]\nAmazon S3 bucket name to which the output file will be saved.\n
:type OutputS3KeyPrefix: string
:param OutputS3KeyPrefix: The Amazon S3 key prefix for the output speech file.
:type SampleRate: string
:param SampleRate: The audio frequency specified in Hz.\nThe valid values for mp3 and ogg_vorbis are '8000', '16000', '22050', and '24000'. The default value for standard voices is '22050'. The default value for neural voices is '24000'.\nValid values for pcm are '8000' and '16000' The default value is '16000'.\n
:type SnsTopicArn: string
:param SnsTopicArn: ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.
:type SpeechMarkTypes: list
:param SpeechMarkTypes: The type of speech marks returned for the input text.\n\n(string) --\n\n
:type Text: string
:param Text: [REQUIRED]\nThe input text to synthesize. If you specify ssml as the TextType, follow the SSML format for the input text.\n
:type TextType: string
:param TextType: Specifies whether the input text is plain text or SSML. The default value is plain text.
:type VoiceId: string
:param VoiceId: [REQUIRED]\nVoice ID to use for the synthesis.\n
:rtype: dict
ReturnsResponse Syntax
{
'SynthesisTask': {
'Engine': 'standard'|'neural',
'TaskId': 'string',
'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',
'TaskStatusReason': 'string',
'OutputUri': 'string',
'CreationTime': datetime(2015, 1, 1),
'RequestCharacters': 123,
'SnsTopicArn': 'string',
'LexiconNames': [
'string',
],
'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',
'SampleRate': 'string',
'SpeechMarkTypes': [
'sentence'|'ssml'|'viseme'|'word',
],
'TextType': 'ssml'|'text',
'VoiceId': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'
}
}
Response Structure
(dict) --
SynthesisTask (dict) --
SynthesisTask object that provides information and attributes about a newly submitted speech synthesis task.
Engine (string) --
Specifies the engine (standard or neural ) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.
TaskId (string) --
The Amazon Polly generated identifier for a speech synthesis task.
TaskStatus (string) --
Current status of the individual speech synthesis task.
TaskStatusReason (string) --
Reason for the current status of a specific speech synthesis task, including errors if the task has failed.
OutputUri (string) --
Pathway for the output speech file.
CreationTime (datetime) --
Timestamp for the time the synthesis task was started.
RequestCharacters (integer) --
Number of billable characters synthesized.
SnsTopicArn (string) --
ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.
LexiconNames (list) --
List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.
(string) --
OutputFormat (string) --
The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.
SampleRate (string) --
The audio frequency specified in Hz.
The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000".
Valid values for pcm are "8000" and "16000" The default value is "16000".
SpeechMarkTypes (list) --
The type of speech marks returned for the input text.
(string) --
TextType (string) --
Specifies whether the input text is plain text or SSML. The default value is plain text.
VoiceId (string) --
Voice ID to use for the synthesis.
LanguageCode (string) --
Optional language code for a synthesis task. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).
If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.
Exceptions
Polly.Client.exceptions.TextLengthExceededException
Polly.Client.exceptions.InvalidS3BucketException
Polly.Client.exceptions.InvalidS3KeyException
Polly.Client.exceptions.InvalidSampleRateException
Polly.Client.exceptions.InvalidSnsTopicArnException
Polly.Client.exceptions.InvalidSsmlException
Polly.Client.exceptions.EngineNotSupportedException
Polly.Client.exceptions.LexiconNotFoundException
Polly.Client.exceptions.ServiceFailureException
Polly.Client.exceptions.MarksNotSupportedForFormatException
Polly.Client.exceptions.SsmlMarksNotSupportedForTextTypeException
Polly.Client.exceptions.LanguageNotSupportedException
:return: {
'SynthesisTask': {
'Engine': 'standard'|'neural',
'TaskId': 'string',
'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',
'TaskStatusReason': 'string',
'OutputUri': 'string',
'CreationTime': datetime(2015, 1, 1),
'RequestCharacters': 123,
'SnsTopicArn': 'string',
'LexiconNames': [
'string',
],
'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',
'SampleRate': 'string',
'SpeechMarkTypes': [
'sentence'|'ssml'|'viseme'|'word',
],
'TextType': 'ssml'|'text',
'VoiceId': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'
}
}
:returns:
(string) --
"""
pass
def synthesize_speech(Engine=None, LanguageCode=None, LexiconNames=None, OutputFormat=None, SampleRate=None, SpeechMarkTypes=None, Text=None, TextType=None, VoiceId=None):
"""
Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes. SSML input must be valid, well-formed SSML. Some alphabets might not be available with all the voices (for example, Cyrillic might not be read at all by English voices) unless phoneme mapping is used. For more information, see How it Works .
See also: AWS API Documentation
Exceptions
Examples
Synthesizes plain text or SSML into a file of human-like speech.
Expected Output:
:example: response = client.synthesize_speech(
Engine='standard'|'neural',
LanguageCode='arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
LexiconNames=[
'string',
],
OutputFormat='json'|'mp3'|'ogg_vorbis'|'pcm',
SampleRate='string',
SpeechMarkTypes=[
'sentence'|'ssml'|'viseme'|'word',
],
Text='string',
TextType='ssml'|'text',
VoiceId='Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu'
)
:type Engine: string
:param Engine: Specifies the engine (standard or neural ) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.
:type LanguageCode: string
:param LanguageCode: Optional language code for the Synthesize Speech request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).\nIf a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.\n
:type LexiconNames: list
:param LexiconNames: List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice. For information about storing lexicons, see PutLexicon .\n\n(string) --\n\n
:type OutputFormat: string
:param OutputFormat: [REQUIRED]\nThe format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.\nWhen pcm is used, the content returned is audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format.\n
:type SampleRate: string
:param SampleRate: The audio frequency specified in Hz.\nThe valid values for mp3 and ogg_vorbis are '8000', '16000', '22050', and '24000'. The default value for standard voices is '22050'. The default value for neural voices is '24000'.\nValid values for pcm are '8000' and '16000' The default value is '16000'.\n
:type SpeechMarkTypes: list
:param SpeechMarkTypes: The type of speech marks returned for the input text.\n\n(string) --\n\n
:type Text: string
:param Text: [REQUIRED]\nInput text to synthesize. If you specify ssml as the TextType , follow the SSML format for the input text.\n
:type TextType: string
:param TextType: Specifies whether the input text is plain text or SSML. The default value is plain text. For more information, see Using SSML .
:type VoiceId: string
:param VoiceId: [REQUIRED]\nVoice ID to use for the synthesis. You can get a list of available voice IDs by calling the DescribeVoices operation.\n
:rtype: dict
ReturnsResponse Syntax
{
'AudioStream': StreamingBody(),
'ContentType': 'string',
'RequestCharacters': 123
}
Response Structure
(dict) --
AudioStream (StreamingBody) --
Stream containing the synthesized speech.
ContentType (string) --
Specifies the type audio stream. This should reflect the OutputFormat parameter in your request.
If you request mp3 as the OutputFormat , the ContentType returned is audio/mpeg.
If you request ogg_vorbis as the OutputFormat , the ContentType returned is audio/ogg.
If you request pcm as the OutputFormat , the ContentType returned is audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format.
If you request json as the OutputFormat , the ContentType returned is audio/json.
RequestCharacters (integer) --
Number of characters synthesized.
Exceptions
Polly.Client.exceptions.TextLengthExceededException
Polly.Client.exceptions.InvalidSampleRateException
Polly.Client.exceptions.InvalidSsmlException
Polly.Client.exceptions.LexiconNotFoundException
Polly.Client.exceptions.ServiceFailureException
Polly.Client.exceptions.MarksNotSupportedForFormatException
Polly.Client.exceptions.SsmlMarksNotSupportedForTextTypeException
Polly.Client.exceptions.LanguageNotSupportedException
Polly.Client.exceptions.EngineNotSupportedException
Examples
Synthesizes plain text or SSML into a file of human-like speech.
response = client.synthesize_speech(
LexiconNames=[
'example',
],
OutputFormat='mp3',
SampleRate='8000',
Text='All Gaul is divided into three parts',
TextType='text',
VoiceId='Joanna',
)
print(response)
Expected Output:
{
'AudioStream': 'TEXT',
'ContentType': 'audio/mpeg',
'RequestCharacters': 37,
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'AudioStream': StreamingBody(),
'ContentType': 'string',
'RequestCharacters': 123
}
:returns:
If you request mp3 as the OutputFormat , the ContentType returned is audio/mpeg.
If you request ogg_vorbis as the OutputFormat , the ContentType returned is audio/ogg.
If you request pcm as the OutputFormat , the ContentType returned is audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format.
If you request json as the OutputFormat , the ContentType returned is audio/json.
"""
pass
| 40.484779
| 567
| 0.665489
| 6,951
| 53,197
| 5.081571
| 0.100849
| 0.006795
| 0.025565
| 0.004813
| 0.747608
| 0.728498
| 0.721873
| 0.704802
| 0.690165
| 0.666327
| 0
| 0.011608
| 0.191891
| 53,197
| 1,313
| 568
| 40.515613
| 0.810044
| 0.973946
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
cf0f85f9e709161cf25828cbdd166a5a33e46362
| 2,890
|
py
|
Python
|
biobb_analysis/test/unitests/test_ambertools/test_cpptraj_rmsf_container.py
|
bioexcel/biobb_analysis
|
794683daf65eb13ddaaaf6cf3c19da6d1322a949
|
[
"Apache-2.0"
] | 3
|
2019-05-18T14:52:30.000Z
|
2020-10-18T06:20:00.000Z
|
biobb_analysis/test/unitests/test_ambertools/test_cpptraj_rmsf_container.py
|
bioexcel/biobb_analysis
|
794683daf65eb13ddaaaf6cf3c19da6d1322a949
|
[
"Apache-2.0"
] | 7
|
2019-03-04T15:04:28.000Z
|
2021-06-17T10:57:25.000Z
|
biobb_analysis/test/unitests/test_ambertools/test_cpptraj_rmsf_container.py
|
bioexcel/biobb_analysis
|
794683daf65eb13ddaaaf6cf3c19da6d1322a949
|
[
"Apache-2.0"
] | null | null | null |
from biobb_common.tools import test_fixtures as fx
from biobb_analysis.ambertools.cpptraj_rmsf import cpptraj_rmsf
class TestCpptrajRmsfFirstDocker():
def setUp(self):
fx.test_setup(self,'cpptraj_rmsf_first_docker')
def tearDown(self):
fx.test_teardown(self)
pass
def test_rmsf_first_docker(self):
cpptraj_rmsf(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_cpptraj_path'])
assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path'])
class TestCpptrajRmsfAverageDocker():
def setUp(self):
fx.test_setup(self,'cpptraj_rmsf_average_docker')
def tearDown(self):
fx.test_teardown(self)
pass
def test_rmsf_average_docker(self):
cpptraj_rmsf(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_cpptraj_path'])
assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path'])
class TestCpptrajRmsfExperimentalDocker():
def setUp(self):
fx.test_setup(self,'cpptraj_rmsf_experimental_docker')
def tearDown(self):
fx.test_teardown(self)
pass
def test_rmsf_experimental_docker(self):
cpptraj_rmsf(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_cpptraj_path'])
assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path'])
class TestCpptrajRmsfFirstSingularity():
def setUp(self):
fx.test_setup(self,'cpptraj_rmsf_first_singularity')
def tearDown(self):
fx.test_teardown(self)
pass
def test_rmsf_first_singularity(self):
cpptraj_rmsf(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_cpptraj_path'])
assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path'])
class TestCpptrajRmsfAverageSingularity():
def setUp(self):
fx.test_setup(self,'cpptraj_rmsf_average_singularity')
def tearDown(self):
fx.test_teardown(self)
pass
def test_rmsf_average_singularity(self):
cpptraj_rmsf(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_cpptraj_path'])
assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path'])
class TestCpptrajRmsfExperimentalSingularity():
def setUp(self):
fx.test_setup(self,'cpptraj_rmsf_experimental_singularity')
def tearDown(self):
fx.test_teardown(self)
pass
def test_rmsf_experimental_singularity(self):
cpptraj_rmsf(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_cpptraj_path'])
assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path'])
| 35.679012
| 97
| 0.721453
| 360
| 2,890
| 5.494444
| 0.108333
| 0.109201
| 0.154702
| 0.133468
| 0.852882
| 0.852882
| 0.852882
| 0.852882
| 0.852882
| 0.852882
| 0
| 0
| 0.171626
| 2,890
| 81
| 98
| 35.679012
| 0.826232
| 0
| 0
| 0.677419
| 0
| 0
| 0.1899
| 0.111034
| 0
| 0
| 0
| 0
| 0.193548
| 1
| 0.290323
| false
| 0.096774
| 0.032258
| 0
| 0.419355
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
cf437148f5792bad0d1bb0e21d2e77891b89dd22
| 205
|
py
|
Python
|
coronavirus/__init__.py
|
rwright88/coronavirus
|
bff3bd893f11827cf54ea7a026831c141ad41f32
|
[
"MIT"
] | null | null | null |
coronavirus/__init__.py
|
rwright88/coronavirus
|
bff3bd893f11827cf54ea7a026831c141ad41f32
|
[
"MIT"
] | null | null | null |
coronavirus/__init__.py
|
rwright88/coronavirus
|
bff3bd893f11827cf54ea7a026831c141ad41f32
|
[
"MIT"
] | null | null | null |
from coronavirus.calc import calc_stats
from coronavirus.data import get_data
from coronavirus.forecast import forecast_all
from coronavirus.plot import map_by_date
from coronavirus.plot import plot_trend
| 34.166667
| 45
| 0.878049
| 31
| 205
| 5.612903
| 0.451613
| 0.431034
| 0.218391
| 0.287356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 205
| 5
| 46
| 41
| 0.940541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d8805f29432b6812305bad1020bac7d85b4d15f9
| 42,413
|
py
|
Python
|
src/mbed_cloud/_backends/enrollment/apis/public_api_api.py
|
GQMai/mbed-cloud-sdk-python
|
76ef009903415f37f69dcc5778be8f5fb14c08fe
|
[
"Apache-2.0"
] | 12
|
2017-12-28T11:18:43.000Z
|
2020-10-04T12:11:15.000Z
|
src/mbed_cloud/_backends/enrollment/apis/public_api_api.py
|
GQMai/mbed-cloud-sdk-python
|
76ef009903415f37f69dcc5778be8f5fb14c08fe
|
[
"Apache-2.0"
] | 50
|
2017-12-21T12:50:41.000Z
|
2020-01-13T16:07:08.000Z
|
src/mbed_cloud/_backends/enrollment/apis/public_api_api.py
|
GQMai/mbed-cloud-sdk-python
|
76ef009903415f37f69dcc5778be8f5fb14c08fe
|
[
"Apache-2.0"
] | 8
|
2018-04-25T17:47:29.000Z
|
2019-08-29T06:38:27.000Z
|
# coding: utf-8
"""
Enrollment API
Connect Enrollment Service allows users to claim the ownership of a device which is not yet assigned to an account. A device without an assigned account can be a device purchased from the open market (OEM dealer) or a device transferred from an account to another. More information in [Device ownership: First-to-claim](/docs/current/connecting/device-ownership-first-to-claim-by-enrollment-list.html) document.
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ..api_client import ApiClient
class PublicAPIApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_bulk_device_enrollment(self, enrollment_identities, **kwargs): # noqa: E501
"""Bulk upload # noqa: E501
With bulk upload, you can upload a `CSV` file containing a number of enrollment IDs. **Example usage:** ``` curl -X POST \\ -H 'Authorization: Bearer <valid access token>' \\ -F 'enrollment_identities=@/path/to/enrollments/enrollments.csv' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments-bulk-uploads ``` **An example `CSV` file:** 1. The first line is assumed to be the header. The content of the header is not validated. 2. Each line can contain comma-separated values, where the first value is always assumed to be the Enrollment ID. 3. Only one enrollment ID is expected per line. 4. Valid enrollments begin with A followed by a - and 95 characters in the format as below. 5. Valid enrollment identities may be enclosed within quotes. 6. UTF-8 encoding is expected. ``` \"enrollment_identity\" \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:44:71:93:23:22:15:43:23:12\", \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:25:48:44:71:22:15:43:23:12\", ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_bulk_device_enrollment(enrollment_identities, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param file enrollment_identities: The `CSV` file containing the enrollment IDs. The maximum file size is 10MB. (required)
:return: BulkResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.create_bulk_device_enrollment_with_http_info(enrollment_identities, **kwargs) # noqa: E501
else:
(data) = self.create_bulk_device_enrollment_with_http_info(enrollment_identities, **kwargs) # noqa: E501
return data
def create_bulk_device_enrollment_with_http_info(self, enrollment_identities, **kwargs): # noqa: E501
"""Bulk upload # noqa: E501
With bulk upload, you can upload a `CSV` file containing a number of enrollment IDs. **Example usage:** ``` curl -X POST \\ -H 'Authorization: Bearer <valid access token>' \\ -F 'enrollment_identities=@/path/to/enrollments/enrollments.csv' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments-bulk-uploads ``` **An example `CSV` file:** 1. The first line is assumed to be the header. The content of the header is not validated. 2. Each line can contain comma-separated values, where the first value is always assumed to be the Enrollment ID. 3. Only one enrollment ID is expected per line. 4. Valid enrollments begin with A followed by a - and 95 characters in the format as below. 5. Valid enrollment identities may be enclosed within quotes. 6. UTF-8 encoding is expected. ``` \"enrollment_identity\" \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:44:71:93:23:22:15:43:23:12\", \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:25:48:44:71:22:15:43:23:12\", ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_bulk_device_enrollment_with_http_info(enrollment_identities, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param file enrollment_identities: The `CSV` file containing the enrollment IDs. The maximum file size is 10MB. (required)
:return: BulkResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['enrollment_identities'] # noqa: E501
all_params.append('asynchronous')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_bulk_device_enrollment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'enrollment_identities' is set
if ('enrollment_identities' not in params or
params['enrollment_identities'] is None):
raise ValueError("Missing the required parameter `enrollment_identities` when calling `create_bulk_device_enrollment`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'enrollment_identities' in params:
local_var_files['enrollment_identities'] = params['enrollment_identities'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v3/device-enrollments-bulk-uploads', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BulkResponse', # noqa: E501
auth_settings=auth_settings,
asynchronous=params.get('asynchronous'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_device_enrollment(self, enrollment_identity, **kwargs): # noqa: E501
"""Place an enrollment claim for one or several devices. # noqa: E501
When the device connects to the bootstrap server and provides the enrollment ID, it will be assigned to your account. <br> **Example usage:** ``` curl -X POST \\ -H 'Authorization: Bearer <valid access token>' \\ -H 'content-type: application/json' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments \\ -d '{\"enrollment_identity\": \"A-35:e7:72:8a:07:50:3b:3d:75:96:57:52:72:41:0d:78:cc:c6:e5:53:48:c6:65:58:5b:fa:af:4d:2d:73:95:c5\"}' ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_device_enrollment(enrollment_identity, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param EnrollmentId enrollment_identity: (required)
:return: EnrollmentIdentity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.create_device_enrollment_with_http_info(enrollment_identity, **kwargs) # noqa: E501
else:
(data) = self.create_device_enrollment_with_http_info(enrollment_identity, **kwargs) # noqa: E501
return data
def create_device_enrollment_with_http_info(self, enrollment_identity, **kwargs): # noqa: E501
"""Place an enrollment claim for one or several devices. # noqa: E501
When the device connects to the bootstrap server and provides the enrollment ID, it will be assigned to your account. <br> **Example usage:** ``` curl -X POST \\ -H 'Authorization: Bearer <valid access token>' \\ -H 'content-type: application/json' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments \\ -d '{\"enrollment_identity\": \"A-35:e7:72:8a:07:50:3b:3d:75:96:57:52:72:41:0d:78:cc:c6:e5:53:48:c6:65:58:5b:fa:af:4d:2d:73:95:c5\"}' ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_device_enrollment_with_http_info(enrollment_identity, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param EnrollmentId enrollment_identity: (required)
:return: EnrollmentIdentity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['enrollment_identity'] # noqa: E501
all_params.append('asynchronous')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_device_enrollment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'enrollment_identity' is set
if ('enrollment_identity' not in params or
params['enrollment_identity'] is None):
raise ValueError("Missing the required parameter `enrollment_identity` when calling `create_device_enrollment`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'enrollment_identity' in params:
body_params = params['enrollment_identity']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v3/device-enrollments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EnrollmentIdentity', # noqa: E501
auth_settings=auth_settings,
asynchronous=params.get('asynchronous'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_bulk_device_enrollment(self, enrollment_identities, **kwargs): # noqa: E501
"""Bulk delete # noqa: E501
With bulk delete, you can upload a `CSV` file containing a number of enrollment IDs to be deleted. **Example usage:** ``` curl -X POST \\ -H 'Authorization: Bearer <valid access token>' \\ -F 'enrollment_identities=@/path/to/enrollments/enrollments.csv' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments-bulk-deletes ``` **An example `CSV` file:** 1. The first line is assumed to be the header. The content of the header is not validated. 2. Each line can contain comma-separated values, where the first value is always assumed to be the Enrollment ID. 3. Only one enrollment ID is expected per line. 4. Valid enrollments begin with A followed by a - and 95 characters in the format as below. 5. Valid enrollment identities may be enclosed within quotes. 6. UTF-8 encoding is expected. ``` \"enrollment_identity\" \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:44:71:93:23:22:15:43:23:12\", \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:25:48:44:71:22:15:43:23:12\", ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_bulk_device_enrollment(enrollment_identities, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param file enrollment_identities: The `CSV` file containing the enrollment IDs. The maximum file size is 10MB. (required)
:return: BulkResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_bulk_device_enrollment_with_http_info(enrollment_identities, **kwargs) # noqa: E501
else:
(data) = self.delete_bulk_device_enrollment_with_http_info(enrollment_identities, **kwargs) # noqa: E501
return data
def delete_bulk_device_enrollment_with_http_info(self, enrollment_identities, **kwargs): # noqa: E501
"""Bulk delete # noqa: E501
With bulk delete, you can upload a `CSV` file containing a number of enrollment IDs to be deleted. **Example usage:** ``` curl -X POST \\ -H 'Authorization: Bearer <valid access token>' \\ -F 'enrollment_identities=@/path/to/enrollments/enrollments.csv' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments-bulk-deletes ``` **An example `CSV` file:** 1. The first line is assumed to be the header. The content of the header is not validated. 2. Each line can contain comma-separated values, where the first value is always assumed to be the Enrollment ID. 3. Only one enrollment ID is expected per line. 4. Valid enrollments begin with A followed by a - and 95 characters in the format as below. 5. Valid enrollment identities may be enclosed within quotes. 6. UTF-8 encoding is expected. ``` \"enrollment_identity\" \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:44:71:93:23:22:15:43:23:12\", \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:25:48:44:71:22:15:43:23:12\", ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_bulk_device_enrollment_with_http_info(enrollment_identities, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param file enrollment_identities: The `CSV` file containing the enrollment IDs. The maximum file size is 10MB. (required)
:return: BulkResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['enrollment_identities'] # noqa: E501
all_params.append('asynchronous')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_bulk_device_enrollment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'enrollment_identities' is set
if ('enrollment_identities' not in params or
params['enrollment_identities'] is None):
raise ValueError("Missing the required parameter `enrollment_identities` when calling `delete_bulk_device_enrollment`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'enrollment_identities' in params:
local_var_files['enrollment_identities'] = params['enrollment_identities'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v3/device-enrollments-bulk-deletes', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BulkResponse', # noqa: E501
auth_settings=auth_settings,
asynchronous=params.get('asynchronous'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_device_enrollment(self, id, **kwargs): # noqa: E501
"""Delete an enrollment by ID. # noqa: E501
To free a device from your account you can delete the enrollment claim. To bypass the device ownership, you need to delete the enrollment and do a factory reset for the device. For more information, see [Transferring the ownership using First-to-Claim](/docs/current/connecting/device-ownership.html). <br> **Example usage:** ``` curl -X DELETE \\ -H 'Authorization: Bearer <valid access token>' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments/{id} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_device_enrollment(id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str id: Enrollment identity. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_device_enrollment_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_device_enrollment_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_device_enrollment_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete an enrollment by ID. # noqa: E501
To free a device from your account you can delete the enrollment claim. To bypass the device ownership, you need to delete the enrollment and do a factory reset for the device. For more information, see [Transferring the ownership using First-to-Claim](/docs/current/connecting/device-ownership.html). <br> **Example usage:** ``` curl -X DELETE \\ -H 'Authorization: Bearer <valid access token>' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments/{id} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_device_enrollment_with_http_info(id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str id: Enrollment identity. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('asynchronous')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_device_enrollment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_device_enrollment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v3/device-enrollments/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
asynchronous=params.get('asynchronous'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bulk_device_enrollment(self, id, **kwargs): # noqa: E501
"""Get bulk upload entity # noqa: E501
Provides information on bulk upload for the given ID. For example, the bulk status and the number of processed enrollment identities. Also links to the bulk upload reports are provided. **Example usage:** ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments-bulk-uploads/{id} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_bulk_device_enrollment(id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str id: Bulk create task entity ID (required)
:return: BulkResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_bulk_device_enrollment_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_bulk_device_enrollment_with_http_info(id, **kwargs) # noqa: E501
return data
def get_bulk_device_enrollment_with_http_info(self, id, **kwargs): # noqa: E501
"""Get bulk upload entity # noqa: E501
Provides information on bulk upload for the given ID. For example, the bulk status and the number of processed enrollment identities. Also links to the bulk upload reports are provided. **Example usage:** ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments-bulk-uploads/{id} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_bulk_device_enrollment_with_http_info(id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str id: Bulk create task entity ID (required)
:return: BulkResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('asynchronous')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bulk_device_enrollment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_bulk_device_enrollment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v3/device-enrollments-bulk-uploads/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BulkResponse', # noqa: E501
auth_settings=auth_settings,
asynchronous=params.get('asynchronous'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bulk_device_enrollment_delete(self, id, **kwargs): # noqa: E501
"""Get bulk delete entity # noqa: E501
Provides information on bulk delete for the given ID. For example, the bulk status and the number of processed enrollment identities. Also links to the bulk delete reports are provided. **Example usage:** ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments-bulk-deletes/{id} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_bulk_device_enrollment_delete(id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str id: Bulk delete task entity ID (required)
:return: BulkResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_bulk_device_enrollment_delete_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_bulk_device_enrollment_delete_with_http_info(id, **kwargs) # noqa: E501
return data
def get_bulk_device_enrollment_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""Get bulk delete entity # noqa: E501
Provides information on bulk delete for the given ID. For example, the bulk status and the number of processed enrollment identities. Also links to the bulk delete reports are provided. **Example usage:** ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments-bulk-deletes/{id} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_bulk_device_enrollment_delete_with_http_info(id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str id: Bulk delete task entity ID (required)
:return: BulkResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('asynchronous')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bulk_device_enrollment_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_bulk_device_enrollment_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v3/device-enrollments-bulk-deletes/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BulkResponse', # noqa: E501
auth_settings=auth_settings,
asynchronous=params.get('asynchronous'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_device_enrollment(self, id, **kwargs): # noqa: E501
"""Get details of an enrollment by ID. # noqa: E501
To check the enrollment info in detail, for example date of claim and expiration date. **Example usage:** ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments/{id} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_device_enrollment(id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str id: Enrollment identity. (required)
:return: EnrollmentIdentity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_device_enrollment_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_device_enrollment_with_http_info(id, **kwargs) # noqa: E501
return data
def get_device_enrollment_with_http_info(self, id, **kwargs): # noqa: E501
"""Get details of an enrollment by ID. # noqa: E501
To check the enrollment info in detail, for example date of claim and expiration date. **Example usage:** ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments/{id} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_device_enrollment_with_http_info(id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str id: Enrollment identity. (required)
:return: EnrollmentIdentity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('asynchronous')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_device_enrollment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_device_enrollment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v3/device-enrollments/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EnrollmentIdentity', # noqa: E501
auth_settings=auth_settings,
asynchronous=params.get('asynchronous'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_device_enrollments(self, **kwargs): # noqa: E501
"""Get enrollment list. # noqa: E501
Provides a list of pending and claimed enrollments. **Example usage:** ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments ``` With query parameters: ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ 'https://api.us-east-1.mbedcloud.com/v3/device-enrollments?limit=10' ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_device_enrollments(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: Number of results to be returned. Between 2 and 1000, inclusive.
:param str after: Entity ID to fetch after.
:param str order: ASC or DESC
:param str include: Comma-separated additional data to return. Currently supported: total_count.
:return: EnrollmentIdentities
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_device_enrollments_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_device_enrollments_with_http_info(**kwargs) # noqa: E501
return data
def get_device_enrollments_with_http_info(self, **kwargs): # noqa: E501
"""Get enrollment list. # noqa: E501
Provides a list of pending and claimed enrollments. **Example usage:** ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments ``` With query parameters: ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ 'https://api.us-east-1.mbedcloud.com/v3/device-enrollments?limit=10' ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_device_enrollments_with_http_info(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: Number of results to be returned. Between 2 and 1000, inclusive.
:param str after: Entity ID to fetch after.
:param str order: ASC or DESC
:param str include: Comma-separated additional data to return. Currently supported: total_count.
:return: EnrollmentIdentities
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['limit', 'after', 'order', 'include'] # noqa: E501
all_params.append('asynchronous')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_device_enrollments" % key
)
params[key] = val
del params['kwargs']
if 'limit' in params and params['limit'] > 1000: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_device_enrollments`, must be a value less than or equal to `1000`") # noqa: E501
if 'limit' in params and params['limit'] < 2: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_device_enrollments`, must be a value greater than or equal to `2`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'after' in params:
query_params.append(('after', params['after'])) # noqa: E501
if 'order' in params:
query_params.append(('order', params['order'])) # noqa: E501
if 'include' in params:
query_params.append(('include', params['include'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v3/device-enrollments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EnrollmentIdentities', # noqa: E501
auth_settings=auth_settings,
asynchronous=params.get('asynchronous'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 50.551847
| 1,061
| 0.641171
| 5,238
| 42,413
| 5.021955
| 0.068156
| 0.040449
| 0.02433
| 0.021897
| 0.960198
| 0.953963
| 0.951302
| 0.941684
| 0.935906
| 0.933701
| 0
| 0.031801
| 0.257115
| 42,413
| 838
| 1,062
| 50.612172
| 0.803066
| 0.461887
| 0
| 0.794643
| 0
| 0.004464
| 0.20019
| 0.0739
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037946
| false
| 0
| 0.008929
| 0
| 0.102679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d892995b60c5c42b2a079b10bef6d2f69d80e364
| 3,006
|
py
|
Python
|
menucard/migrations/0002_auto_20201230_1855.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | 1
|
2021-01-23T21:42:10.000Z
|
2021-01-23T21:42:10.000Z
|
menucard/migrations/0002_auto_20201230_1855.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | null | null | null |
menucard/migrations/0002_auto_20201230_1855.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-12-30 17:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menucard', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='alkoholfreiedrinks',
name='zusatzstoffe',
field=models.CharField(blank=True, max_length=55, null=True),
),
migrations.AddField(
model_name='alkoholhaltigedrinks',
name='zusatzstoffe',
field=models.CharField(blank=True, default='', max_length=55, null=True),
),
migrations.AddField(
model_name='hauptspeise',
name='zusatzstoffe',
field=models.CharField(blank=True, max_length=55, null=True),
),
migrations.AddField(
model_name='nachspeise',
name='zusatzstoffe',
field=models.CharField(blank=True, max_length=55, null=True),
),
migrations.AddField(
model_name='snacks',
name='zusatzstoffe',
field=models.CharField(blank=True, max_length=55, null=True),
),
migrations.AddField(
model_name='vorspeise',
name='zusatzstoffe',
field=models.CharField(blank=True, max_length=55, null=True),
),
migrations.AlterField(
model_name='alkoholfreiedrinks',
name='liter',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='alkoholfreiedrinks',
name='preis',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='alkoholhaltigedrinks',
name='centiliter',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='alkoholhaltigedrinks',
name='preis',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='hauptspeise',
name='preis',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='nachspeise',
name='preis',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='snacks',
name='preis',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
migrations.AlterField(
model_name='vorspeise',
name='preis',
field=models.DecimalField(decimal_places=2, default='', max_digits=8, max_length=8),
),
]
| 35.785714
| 96
| 0.581171
| 290
| 3,006
| 5.868966
| 0.175862
| 0.074031
| 0.117509
| 0.13631
| 0.823149
| 0.823149
| 0.788484
| 0.762045
| 0.762045
| 0.735018
| 0
| 0.02598
| 0.295742
| 3,006
| 83
| 97
| 36.216867
| 0.777988
| 0.01497
| 0
| 0.87013
| 1
| 0
| 0.109159
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.012987
| 0
| 0.051948
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2b28d264548b8bd26f2a1b9be0852a2e085854d0
| 191
|
py
|
Python
|
django/code/TheSphinx/views/__init__.py
|
aitalshashank2/The-Meeting-Sphinx
|
7825c8f94748298f0c81bb0f40eeac56177526f7
|
[
"Apache-2.0"
] | 4
|
2021-05-16T18:02:30.000Z
|
2021-05-21T16:32:17.000Z
|
django/code/TheSphinx/views/__init__.py
|
aitalshashank2/The-Meeting-Sphinx
|
7825c8f94748298f0c81bb0f40eeac56177526f7
|
[
"Apache-2.0"
] | 5
|
2021-05-07T16:31:27.000Z
|
2021-06-04T12:24:28.000Z
|
django/code/TheSphinx/views/__init__.py
|
aitalshashank2/The-Meeting-Sphinx
|
7825c8f94748298f0c81bb0f40eeac56177526f7
|
[
"Apache-2.0"
] | 2
|
2021-05-24T06:56:53.000Z
|
2021-08-05T10:10:58.000Z
|
from TheSphinx.views.auth import *
from TheSphinx.views.meeting import *
from TheSphinx.views.message import *
from TheSphinx.views.recording import *
from TheSphinx.views.attendee import *
| 31.833333
| 39
| 0.811518
| 25
| 191
| 6.2
| 0.36
| 0.419355
| 0.580645
| 0.619355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109948
| 191
| 5
| 40
| 38.2
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
2b661898ee4b354eed1dcf9d68a082aeda99d3c5
| 106
|
py
|
Python
|
terl/config/__init__.py
|
Bouk250/Terl
|
1a81955146f2039af164e246a74003a51fc3bc2c
|
[
"MIT"
] | null | null | null |
terl/config/__init__.py
|
Bouk250/Terl
|
1a81955146f2039af164e246a74003a51fc3bc2c
|
[
"MIT"
] | null | null | null |
terl/config/__init__.py
|
Bouk250/Terl
|
1a81955146f2039af164e246a74003a51fc3bc2c
|
[
"MIT"
] | null | null | null |
from .EnvConfigManager import EnvConfigManager
from .EnvConfigManager import config_checker,get_new_config
| 53
| 59
| 0.90566
| 12
| 106
| 7.75
| 0.583333
| 0.430108
| 0.55914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066038
| 106
| 2
| 59
| 53
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
990db98862a96c4e36b3818d9bdd378e04c1c292
| 7,542
|
py
|
Python
|
cycle_2020/migrations/0002_auto_20190123_1949.py
|
RobBickel/nyt-fec
|
802df867c3b31fff8e922be00bab6f40a5db2d00
|
[
"Apache-2.0"
] | 17
|
2018-03-27T15:09:58.000Z
|
2020-05-13T11:32:43.000Z
|
cycle_2020/migrations/0002_auto_20190123_1949.py
|
RobBickel/nyt-fec
|
802df867c3b31fff8e922be00bab6f40a5db2d00
|
[
"Apache-2.0"
] | 59
|
2018-03-21T17:08:15.000Z
|
2021-12-13T19:47:37.000Z
|
cycle_2020/migrations/0002_auto_20190123_1949.py
|
RobBickel/nyt-fec
|
802df867c3b31fff8e922be00bab6f40a5db2d00
|
[
"Apache-2.0"
] | 11
|
2018-09-11T23:18:32.000Z
|
2021-12-15T08:43:58.000Z
|
# Generated by Django 2.1.5 on 2019-01-23 19:49
import django.contrib.postgres.indexes
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cycle_2020', '0001_squashed_0038_auto_20190115_1828'),
]
operations = [
migrations.RemoveIndex(
model_name='committee',
name='cycle_2020__fec_id_f4fdd7_idx',
),
migrations.RemoveIndex(
model_name='committee',
name='cycle_2020__committ_c028f2_idx',
),
migrations.RemoveIndex(
model_name='committee',
name='cycle_2020__name_se_7a2d50_gin',
),
migrations.RemoveIndex(
model_name='filing',
name='cycle_2020__filer_i_254d8d_idx',
),
migrations.RemoveIndex(
model_name='filing',
name='cycle_2020__filing__c7b61a_idx',
),
migrations.RemoveIndex(
model_name='filing',
name='cycle_2020__committ_43226b_idx',
),
migrations.RemoveIndex(
model_name='schedulea',
name='cycle_2020_address_804b9a_gin',
),
migrations.RemoveIndex(
model_name='schedulea',
name='cycle_2020__filing__750e4e_idx',
),
migrations.RemoveIndex(
model_name='schedulea',
name='cycle_2020__filer_c_5e3bbc_idx',
),
migrations.RemoveIndex(
model_name='schedulea',
name='cycle_2020__name_se_05d231_gin',
),
migrations.RemoveIndex(
model_name='schedulea',
name='cycle_2020__contrib_d1e32a_idx',
),
migrations.RemoveIndex(
model_name='schedulea',
name='cycle_2020__occupat_d47d12_gin',
),
migrations.RemoveIndex(
model_name='schedulea',
name='cycle_2020__address_ab4e5a_gin',
),
migrations.RemoveIndex(
model_name='scheduleb',
name='cycle_2020__filing__7805a2_idx',
),
migrations.RemoveIndex(
model_name='scheduleb',
name='cycle_2020__filer_c_6b2904_idx',
),
migrations.RemoveIndex(
model_name='scheduleb',
name='cycle_2020__name_se_0a35cc_gin',
),
migrations.RemoveIndex(
model_name='scheduleb',
name='cycle_2020__purpose_17995a_gin',
),
migrations.RemoveIndex(
model_name='scheduleb',
name='cycle_2020__address_f469ae_gin',
),
migrations.RemoveIndex(
model_name='schedulee',
name='cycle_2020__filing__91be16_idx',
),
migrations.RemoveIndex(
model_name='schedulee',
name='cycle_2020__filer_c_b78a8c_idx',
),
migrations.RemoveIndex(
model_name='schedulee',
name='cycle_2020__name_se_118184_gin',
),
migrations.RemoveIndex(
model_name='schedulee',
name='cycle_2020__purpose_1d14f3_gin',
),
migrations.RemoveIndex(
model_name='schedulee',
name='cycle_2020__candida_175b6a_gin',
),
migrations.AlterField(
model_name='filing',
name='filing_id',
field=models.IntegerField(primary_key=True, serialize=False),
),
migrations.AddIndex(
model_name='committee',
index=models.Index(fields=['fec_id'], name='cycle_2020__fec_id_244f9c_idx'),
),
migrations.AddIndex(
model_name='committee',
index=models.Index(fields=['committee_name'], name='cycle_2020__committ_319402_idx'),
),
migrations.AddIndex(
model_name='committee',
index=django.contrib.postgres.indexes.GinIndex(fields=['name_search'], name='cycle_2020__name_se_0499a0_gin'),
),
migrations.AddIndex(
model_name='filing',
index=models.Index(fields=['filer_id'], name='cycle_2020__filer_i_669360_idx'),
),
migrations.AddIndex(
model_name='filing',
index=models.Index(fields=['filing_id'], name='cycle_2020__filing__0f4177_idx'),
),
migrations.AddIndex(
model_name='filing',
index=models.Index(fields=['committee_name'], name='cycle_2020__committ_a8729f_idx'),
),
migrations.AddIndex(
model_name='schedulea',
index=models.Index(fields=['filing_id'], name='cycle_2020__filing__4d2e86_idx'),
),
migrations.AddIndex(
model_name='schedulea',
index=models.Index(fields=['filer_committee_id_number'], name='cycle_2020__filer_c_e2edfc_idx'),
),
migrations.AddIndex(
model_name='schedulea',
index=django.contrib.postgres.indexes.GinIndex(fields=['name_search'], name='cycle_2020__name_se_866f1e_gin'),
),
migrations.AddIndex(
model_name='schedulea',
index=models.Index(fields=['contribution_amount'], name='cycle_2020__contrib_3874aa_idx'),
),
migrations.AddIndex(
model_name='schedulea',
index=django.contrib.postgres.indexes.GinIndex(fields=['occupation_search'], name='cycle_2020__occupat_7e8387_gin'),
),
migrations.AddIndex(
model_name='schedulea',
index=django.contrib.postgres.indexes.GinIndex(fields=['address_search'], name='cycle_2020__address_fef428_gin'),
),
migrations.AddIndex(
model_name='scheduleb',
index=models.Index(fields=['filing_id'], name='cycle_2020__filing__cf0841_idx'),
),
migrations.AddIndex(
model_name='scheduleb',
index=models.Index(fields=['filer_committee_id_number'], name='cycle_2020__filer_c_b11732_idx'),
),
migrations.AddIndex(
model_name='scheduleb',
index=django.contrib.postgres.indexes.GinIndex(fields=['name_search'], name='cycle_2020__name_se_9feb48_gin'),
),
migrations.AddIndex(
model_name='scheduleb',
index=django.contrib.postgres.indexes.GinIndex(fields=['purpose_search'], name='cycle_2020__purpose_b8ba26_gin'),
),
migrations.AddIndex(
model_name='scheduleb',
index=django.contrib.postgres.indexes.GinIndex(fields=['address_search'], name='cycle_2020__address_17a51e_gin'),
),
migrations.AddIndex(
model_name='schedulee',
index=models.Index(fields=['filing_id'], name='cycle_2020__filing__b1d78a_idx'),
),
migrations.AddIndex(
model_name='schedulee',
index=models.Index(fields=['filer_committee_id_number'], name='cycle_2020__filer_c_c66501_idx'),
),
migrations.AddIndex(
model_name='schedulee',
index=django.contrib.postgres.indexes.GinIndex(fields=['name_search'], name='cycle_2020__name_se_bbc60d_gin'),
),
migrations.AddIndex(
model_name='schedulee',
index=django.contrib.postgres.indexes.GinIndex(fields=['purpose_search'], name='cycle_2020__purpose_3469db_gin'),
),
migrations.AddIndex(
model_name='schedulee',
index=django.contrib.postgres.indexes.GinIndex(fields=['candidate_search'], name='cycle_2020__candida_55a787_gin'),
),
]
| 37.71
| 128
| 0.608194
| 734
| 7,542
| 5.797003
| 0.158038
| 0.097297
| 0.137485
| 0.162162
| 0.819506
| 0.79577
| 0.79577
| 0.780259
| 0.756522
| 0.400705
| 0
| 0.072982
| 0.280562
| 7,542
| 199
| 129
| 37.899497
| 0.711205
| 0.005967
| 0
| 0.709845
| 1
| 0
| 0.28032
| 0.194663
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010363
| 0
| 0.025907
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9985e828f66d8f690512e605f47c2643c708bd60
| 38,730
|
py
|
Python
|
unet3d/models/pytorch/autoencoder/variational.py
|
zjdcts/CSAM-U-Net
|
91fae3c6b4fc7247ba9ee2dc6e64b51da569bf2e
|
[
"MIT"
] | 1
|
2021-07-28T03:36:34.000Z
|
2021-07-28T03:36:34.000Z
|
unet3d/models/pytorch/autoencoder/variational.py
|
zjdcts/CSAM-U-Net
|
91fae3c6b4fc7247ba9ee2dc6e64b51da569bf2e
|
[
"MIT"
] | null | null | null |
unet3d/models/pytorch/autoencoder/variational.py
|
zjdcts/CSAM-U-Net
|
91fae3c6b4fc7247ba9ee2dc6e64b51da569bf2e
|
[
"MIT"
] | 1
|
2021-07-28T03:36:37.000Z
|
2021-07-28T03:36:37.000Z
|
from functools import partial
import numpy as np
import torch.nn as nn
import torch
from unet3d.models.pytorch.classification.decoder import MyronenkoDecoder, MirroredDecoder
from unet3d.models.pytorch.classification.myronenko import MyronenkoEncoder, MyronenkoConvolutionBlock
from unet3d.models.pytorch.classification.resnet import conv1x1x1
class VariationalBlock(nn.Module):
def __init__(self, in_size, n_features, out_size, return_parameters=False):
super(VariationalBlock, self).__init__()
self.n_features = n_features
self.return_parameters = return_parameters
self.dense1 = nn.Linear(in_size, out_features=n_features * 2)
self.dense2 = nn.Linear(self.n_features, out_size)
@staticmethod
def reparameterize(mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def forward(self, x):
x = self.dense1(x)
mu, logvar = torch.split(x, self.n_features, dim=1)
z = self.reparameterize(mu, logvar)
out = self.dense2(z)
if self.return_parameters:
return out, mu, logvar, z
else:
return out, mu, logvar
class ConvolutionalAutoEncoder(nn.Module):
def __init__(self, input_shape=None, n_features=1, base_width=32, encoder_blocks=None, decoder_blocks=None,
feature_dilation=2, downsampling_stride=2, interpolation_mode="trilinear",
encoder_class=MyronenkoEncoder, decoder_class=None, n_outputs=None, layer_widths=None,
decoder_mirrors_encoder=False, activation=None, use_transposed_convolutions=True, kernel_size=3):
super(ConvolutionalAutoEncoder, self).__init__()
self.base_width = base_width
print("use_transposed_convolutions", use_transposed_convolutions)
if encoder_blocks is None:
encoder_blocks = [1, 2, 2, 4]
self.encoder = encoder_class(n_features=n_features, base_width=base_width, layer_blocks=encoder_blocks,
feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
layer_widths=layer_widths, kernel_size=kernel_size)
decoder_class, decoder_blocks = self.set_decoder_blocks(decoder_class, encoder_blocks, decoder_mirrors_encoder,
decoder_blocks)
self.decoder = decoder_class(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride, feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=use_transposed_convolutions,
kernel_size=kernel_size)
self.set_final_convolution(n_features)
self.set_activation(activation=activation)
def set_final_convolution(self, n_outputs):
self.final_convolution = conv1x1x1(in_planes=self.base_width, out_planes=n_outputs, stride=1)
def set_activation(self, activation):
if activation == "sigmoid":
self.activation = nn.Sigmoid()
elif activation == "softmax":
self.activation = nn.Softmax(dim=1)
else:
self.activation = None
def set_decoder_blocks(self, decoder_class, encoder_blocks, decoder_mirrors_encoder, decoder_blocks):
if decoder_mirrors_encoder:
decoder_blocks = encoder_blocks
if decoder_class is None:
decoder_class = MirroredDecoder
elif decoder_blocks is None:
decoder_blocks = [1] * len(encoder_blocks)
if decoder_class is None:
decoder_class = MyronenkoDecoder
return decoder_class, decoder_blocks
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
x = self.final_convolution(x)
if self.activation is not None:
x = self.activation(x)
return x
class DeepFuseConvolutionalAutoEncoder(nn.Module):
def __init__(self, input_shape=None, n_features=1, base_width=32, encoder_blocks=None, decoder_blocks=None,
feature_dilation=2, downsampling_stride=2, interpolation_mode="trilinear",
encoder_class=MyronenkoEncoder, encoder_class2=None, decoder_class=None, decoder_class2=None,
n_outputs=None, layer_widths=None,
decoder_mirrors_encoder=False, activation=None, use_transposed_convolutions=True, kernel_size=3):
super(DeepFuseConvolutionalAutoEncoder, self).__init__()
self.base_width = base_width
print("use_transposed_convolutions", use_transposed_convolutions)
if encoder_blocks is None:
encoder_blocks = [1, 2, 2, 4]
# self.encoder = encoder_class(n_features=n_features, base_width=base_width, layer_blocks=encoder_blocks,
# feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
# layer_widths=layer_widths, kernel_size=kernel_size)
self.encoder2 = encoder_class2(n_features=n_features, base_width=base_width, layer_blocks=encoder_blocks,
feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
layer_widths=layer_widths, kernel_size=kernel_size)
decoder_class, decoder_blocks = self.set_decoder_blocks(decoder_class, encoder_blocks, decoder_mirrors_encoder,
decoder_blocks)
# self.decoder = decoder_class(base_width=base_width, layer_blocks=decoder_blocks,
# upsampling_scale=downsampling_stride, feature_reduction_scale=feature_dilation,
# upsampling_mode=interpolation_mode, layer_widths=layer_widths,
# use_transposed_convolutions=use_transposed_convolutions,
# kernel_size=kernel_size)
self.decoder2 = decoder_class2(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride, feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=use_transposed_convolutions,
kernel_size=kernel_size)
self.set_final_convolution(n_features)
self.set_activation(activation=activation)
def set_final_convolution(self, n_outputs):
self.final_convolution1 = conv1x1x1(in_planes=self.base_width, out_planes=n_outputs, stride=1)
self.final_convolution2 = conv1x1x1(in_planes=self.base_width, out_planes=n_outputs, stride=1)
self.final_convolution3 = conv1x1x1(in_planes=self.base_width, out_planes=n_outputs, stride=1)
# self.final_convolution = conv1x1x1(in_planes=self.base_width, out_planes=n_outputs, stride=1)
def set_activation(self, activation):
if activation == "sigmoid":
self.activation1 = nn.Sigmoid()
self.activation2 = nn.Sigmoid()
self.activation3 = nn.Sigmoid()
# self.activation = nn.Sigmoid()
elif activation == "softmax":
self.activation = nn.Softmax(dim=1)
else:
self.activation = None
def set_decoder_blocks(self, decoder_class, encoder_blocks, decoder_mirrors_encoder, decoder_blocks):
if decoder_mirrors_encoder:
decoder_blocks = encoder_blocks
if decoder_class is None:
decoder_class = MirroredDecoder
elif decoder_blocks is None:
decoder_blocks = [1] * len(encoder_blocks)
if decoder_class is None:
decoder_class = MyronenkoDecoder
return decoder_class, decoder_blocks
def forward(self, x):
# identity = x
# x = self.encoder(x)
# x = self.decoder(x)
# x = self.activation(self.final_convolution(x))
# print(x[:, 1, :, :, :].unsqueeze(dim=1).size())
# x = self.encoder2(identity * x[:, 1, :, :, :].unsqueeze(dim=1))
x = self.encoder2(x)
x = self.decoder2(x)
x1 = self.final_convolution1(x[0])
x2 = self.final_convolution3(x[1])
x3 = self.final_convolution2(x[2])
# x4 = self.final_convolution(x[3])
x1 = self.activation1(x1)
x2 = self.activation2(x2)
x3 = self.activation3(x3)
# x4 = self.activation(x4)
# print(x1.size(), x2.size(), x3.size(), x4.size())
return x3
# return [x1, x2, x3]
class CascadeConvolutionalAutoEncoder(nn.Module):
def __init__(self, input_shape=None, n_features=1, base_width=32, encoder_blocks=None, decoder_blocks=None,
feature_dilation=2, downsampling_stride=2, interpolation_mode="trilinear",
encoder_class=MyronenkoEncoder, decoder_class=None, decoder_class2=None, n_outputs=None,
is_training=False,
layer_widths=None,
decoder_mirrors_encoder=False, activation=None, use_transposed_convolutions=False, kernel_size=3,
is_skip=False):
super(CascadeConvolutionalAutoEncoder, self).__init__()
self.base_width = base_width
self.is_training = is_training
self.is_skip = is_skip
print("is_skip", is_skip)
if encoder_blocks is None:
encoder_blocks = [1, 2, 2, 4]
self.encoder = encoder_class(n_features=n_features, base_width=base_width, layer_blocks=encoder_blocks,
feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
layer_widths=layer_widths, kernel_size=kernel_size)
decoder_class, decoder_blocks = self.set_decoder_blocks(decoder_class, encoder_blocks, decoder_mirrors_encoder,
decoder_blocks)
self.decoder = decoder_class(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride, feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=use_transposed_convolutions,
kernel_size=kernel_size)
self.final_convolution = self.set_final_convolution(n_outputs)
self.activation = self.set_activation(activation=activation)
if not is_skip:
base_width *= 2
self.base_width = base_width
self.decoder2 = None
self.encoder2 = encoder_class(n_features=n_features + n_outputs, base_width=base_width,
layer_blocks=encoder_blocks,
feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
layer_widths=layer_widths, kernel_size=kernel_size)
if is_skip:
self.decoder2 = decoder_class2(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride,
feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=use_transposed_convolutions,
kernel_size=kernel_size)
else:
self.decoder2 = decoder_class(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride,
feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=use_transposed_convolutions,
kernel_size=kernel_size)
self.final_convolution2 = self.set_final_convolution(n_outputs)
self.activation2 = self.set_activation(activation=activation)
self.decoder3 = None
if is_skip:
self.decoder3 = decoder_class2(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride,
feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=False,
kernel_size=kernel_size)
else:
self.decoder3 = decoder_class(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride,
feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=False,
kernel_size=kernel_size)
self.final_convolution3 = self.set_final_convolution(n_outputs)
self.activation3 = self.set_activation(activation=activation)
def set_final_convolution(self, n_outputs):
return conv1x1x1(in_planes=self.base_width, out_planes=n_outputs, stride=1)
def set_activation(self, activation):
if activation == "sigmoid":
return nn.Sigmoid()
elif activation == "softmax":
return nn.Softmax(dim=1)
else:
return None
def set_decoder_blocks(self, decoder_class, encoder_blocks, decoder_mirrors_encoder, decoder_blocks):
if decoder_mirrors_encoder:
decoder_blocks = encoder_blocks
if decoder_class is None:
decoder_class = MirroredDecoder
elif decoder_blocks is None:
decoder_blocks = [1] * len(encoder_blocks)
if decoder_class is None:
decoder_class = MyronenkoDecoder
return decoder_class, decoder_blocks
def forward(self, x):
y = None
identity = x
# print(x.size())
x = self.encoder(x)
pre_inputs = x
x = self.decoder(x)
x = self.final_convolution(x)
# print(x.size())
if self.is_training and self.activation is not None:
y = self.activation(x)
# print(y.size())
x = torch.cat((x, identity), 1)
# print(x.size())
x = self.encoder2(x)
if self.is_skip:
z1 = self.decoder2(x, pre_inputs)
else:
z1 = self.decoder2(x, None)
z1 = self.final_convolution2(z1)
if self.activation2 is not None:
z1 = self.activation2(z1)
if self.is_training:
if self.is_skip:
z2 = self.decoder3(x, pre_inputs)
else:
z2 = self.decoder3(x, None)
z2 = self.final_convolution3(z2)
if self.activation3 is not None:
z2 = self.activation3(z2)
outputs = [y, z1, z2]
else:
outputs = z1
return outputs
class CascadeKQVConvolutionalAutoEncoder(nn.Module):
def __init__(self, input_shape=None, n_features=1, base_width=32, encoder_blocks=None, decoder_blocks=None,
feature_dilation=2, downsampling_stride=2, interpolation_mode="trilinear",
encoder_class=MyronenkoEncoder, encoder_class2=None, decoder_class=None, decoder_class2=None,
n_outputs=None, is_training=False,
layer_widths=None,
decoder_mirrors_encoder=False, activation=None, use_transposed_convolutions=False, kernel_size=3,
is_skip=False):
super(CascadeKQVConvolutionalAutoEncoder, self).__init__()
self.base_width = base_width
self.is_training = is_training
self.is_skip = is_skip
print("is_skip", is_skip)
if encoder_blocks is None:
encoder_blocks = [1, 2, 2, 4]
self.encoder = encoder_class(n_features=n_features, base_width=base_width, layer_blocks=encoder_blocks,
feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
layer_widths=layer_widths, kernel_size=kernel_size)
decoder_class, decoder_blocks = self.set_decoder_blocks(decoder_class, encoder_blocks, decoder_mirrors_encoder,
decoder_blocks)
self.decoder = decoder_class(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride, feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=use_transposed_convolutions,
kernel_size=kernel_size)
self.final_convolution = self.set_final_convolution(n_outputs)
self.activation = self.set_activation(activation=activation)
self.decoder2 = None
self.encoder2 = encoder_class2(n_features=n_features + n_outputs, base_width=base_width,
layer_blocks=encoder_blocks,
feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
layer_widths=layer_widths, kernel_size=kernel_size)
if is_skip:
self.decoder2 = decoder_class2(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride,
feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=use_transposed_convolutions,
kernel_size=kernel_size)
else:
self.decoder2 = decoder_class(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride,
feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=use_transposed_convolutions,
kernel_size=kernel_size)
self.final_convolution2 = self.set_final_convolution(n_outputs)
self.activation2 = self.set_activation(activation=activation)
self.decoder3 = None
if is_skip:
self.decoder3 = decoder_class2(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride,
feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=False,
kernel_size=kernel_size)
else:
self.decoder3 = decoder_class(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride,
feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=False,
kernel_size=kernel_size)
self.final_convolution3 = self.set_final_convolution(n_outputs)
self.activation3 = self.set_activation(activation=activation)
def set_final_convolution(self, n_outputs):
return conv1x1x1(in_planes=self.base_width, out_planes=n_outputs, stride=1)
def set_activation(self, activation):
if activation == "sigmoid":
return nn.Sigmoid()
elif activation == "softmax":
return nn.Softmax(dim=1)
else:
return None
def set_decoder_blocks(self, decoder_class, encoder_blocks, decoder_mirrors_encoder, decoder_blocks):
if decoder_mirrors_encoder:
decoder_blocks = encoder_blocks
if decoder_class is None:
decoder_class = MirroredDecoder
elif decoder_blocks is None:
decoder_blocks = [1] * len(encoder_blocks)
if decoder_class is None:
decoder_class = MyronenkoDecoder
return decoder_class, decoder_blocks
def forward(self, x):
y = None
identity = x
# print(x.size())
x = self.encoder(x)
pre_inputs = x
x, q = self.decoder(x)
x = self.final_convolution(x)
# print(x.size())
if self.is_training and self.activation is not None:
y = self.activation(x)
# print(y.size())
x = torch.cat((x, identity), 1)
# print(x.size())
x = self.encoder2(x, q)
if self.is_skip:
z1 = self.decoder2(x, pre_inputs)
else:
z1, t = self.decoder2(x, None)
z1 = self.final_convolution2(z1)
if self.activation2 is not None:
z1 = self.activation2(z1)
if self.is_training:
if self.is_skip:
z2 = self.decoder3(x, pre_inputs)
else:
z2, t = self.decoder3(x, None)
z2 = self.final_convolution3(z2)
if self.activation3 is not None:
z2 = self.activation3(z2)
outputs = [y, z1, z2]
else:
outputs = z1
return outputs
class AttentionGatedConvolutionalAutoEncoder(nn.Module):
def __init__(self, input_shape=None, n_features=1, base_width=32, encoder_blocks=None, decoder_blocks=None,
feature_dilation=2, downsampling_stride=2, interpolation_mode="trilinear",
encoder_class=MyronenkoEncoder, decoder_class=None, n_outputs=None, layer_widths=None,
decoder_mirrors_encoder=False, activation=None, use_transposed_convolutions=False, kernel_size=3):
super(AttentionGatedConvolutionalAutoEncoder, self).__init__()
self.base_width = base_width
if encoder_blocks is None:
encoder_blocks = [1, 2, 2, 4]
self.encoder = encoder_class(n_features=n_features, base_width=base_width, layer_blocks=encoder_blocks,
feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
layer_widths=layer_widths, kernel_size=kernel_size)
decoder_class, decoder_blocks = self.set_decoder_blocks(decoder_class, encoder_blocks, decoder_mirrors_encoder,
decoder_blocks)
self.decoder = decoder_class(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride, feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=use_transposed_convolutions,
kernel_size=kernel_size)
# deep supervision
self.dsv4 = UnetDsv3(in_size=self.base_width * 8, out_size=n_outputs, scale_factor=8)
self.dsv3 = UnetDsv3(in_size=self.base_width * 4, out_size=n_outputs, scale_factor=4)
self.dsv2 = UnetDsv3(in_size=self.base_width * 2, out_size=n_outputs, scale_factor=2)
self.dsv1 = nn.Conv3d(in_channels=self.base_width, out_channels=n_outputs, kernel_size=1)
self.final_convolution = conv1x1x1(in_planes=n_outputs * len(decoder_blocks), out_planes=n_outputs,
stride=1)
self.set_activation(activation=activation)
def set_final_convolution(self, n_inputs, n_outputs):
pass
def set_activation(self, activation):
if activation == "sigmoid":
self.activation = nn.Sigmoid()
elif activation == "softmax":
self.activation = nn.Softmax(dim=1)
else:
self.activation = None
def set_decoder_blocks(self, decoder_class, encoder_blocks, decoder_mirrors_encoder, decoder_blocks):
if decoder_mirrors_encoder:
decoder_blocks = encoder_blocks
if decoder_class is None:
decoder_class = MirroredDecoder
elif decoder_blocks is None:
decoder_blocks = [1] * len(encoder_blocks)
if decoder_class is None:
decoder_class = MyronenkoDecoder
return decoder_class, decoder_blocks
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
# print(x[0].size(), x[1].size(), x[2].size(), x[3].size())
dsv4 = self.dsv4(x[0])
dsv3 = self.dsv3(x[1])
dsv2 = self.dsv2(x[2])
dsv1 = self.dsv1(x[3])
# print(dsv4.size(), dsv3.size(), dsv2.size(), dsv1.size())
x = self.final_convolution(torch.cat([dsv1, dsv2, dsv3, dsv4], dim=1))
# print(x.size())
if self.activation is not None:
x = self.activation(x)
return x
class AttentionCascadeConvolutionalAutoEncoder(nn.Module):
def __init__(self, input_shape=None, n_features=1, base_width=32, encoder_blocks=None, decoder_blocks=None,
feature_dilation=2, downsampling_stride=2, interpolation_mode="trilinear",
encoder_class=MyronenkoEncoder, decoder_class=None, decoder_class2=None, n_outputs=None,
is_training=False, is_dsv=False, is_half=False, layer_widths=None, decoder_mirrors_encoder=False,
activation=None, use_transposed_convolutions=False, kernel_size=3):
super(AttentionCascadeConvolutionalAutoEncoder, self).__init__()
self.base_width = base_width
self.is_training = is_training
if encoder_blocks is None:
encoder_blocks = [1, 2, 2, 4]
self.encoder = encoder_class(n_features=n_features, base_width=base_width, layer_blocks=encoder_blocks,
feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
layer_widths=layer_widths, kernel_size=kernel_size)
decoder_class, decoder_blocks = self.set_decoder_blocks(decoder_class, encoder_blocks, decoder_mirrors_encoder,
decoder_blocks)
self.decoder = decoder_class(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride, feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=use_transposed_convolutions,
kernel_size=kernel_size)
self.is_dsv = is_dsv
if self.is_dsv:
# deep supervision
# self.dsv4 = UnetDsv3(in_size=self.base_width * 8, out_size=n_outputs, scale_factor=8)
self.dsv3 = UnetDsv3(in_size=self.base_width * 4, out_size=n_outputs, scale_factor=4)
self.dsv2 = UnetDsv3(in_size=self.base_width * 2, out_size=n_outputs, scale_factor=2)
self.dsv1 = nn.Conv3d(in_channels=self.base_width, out_channels=n_outputs, kernel_size=1)
self.final_convolution = conv1x1x1(in_planes=n_outputs * len(decoder_blocks), out_planes=n_outputs,
stride=1)
else:
self.final_convolution = self.set_final_convolution(n_outputs)
self.activation = self.set_activation(activation=activation)
self.base_width = base_width * 2
self.encoder2 = encoder_class(n_features=n_features + n_outputs, base_width=base_width * 2,
layer_blocks=encoder_blocks,
feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
layer_widths=layer_widths, kernel_size=kernel_size)
if is_half:
self.decoder2 = decoder_class2(base_width=base_width * 2, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride,
feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=use_transposed_convolutions,
kernel_size=kernel_size)
else:
self.decoder2 = decoder_class(base_width=base_width * 2, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride,
feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=use_transposed_convolutions,
kernel_size=kernel_size)
self.final_convolution2 = self.set_final_convolution(n_outputs)
self.activation2 = self.set_activation(activation=activation)
if is_half:
print("111")
self.decoder3 = decoder_class2(base_width=base_width * 2, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride,
feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=False,
kernel_size=kernel_size)
else:
self.decoder3 = decoder_class(base_width=base_width * 2, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride,
feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=False,
kernel_size=kernel_size)
self.final_convolution3 = self.set_final_convolution(n_outputs)
self.activation3 = self.set_activation(activation=activation)
def set_final_convolution(self, n_outputs):
return conv1x1x1(in_planes=self.base_width, out_planes=n_outputs, stride=1)
def set_activation(self, activation):
if activation == "sigmoid":
return nn.Sigmoid()
elif activation == "softmax":
return nn.Softmax(dim=1)
else:
return None
def set_decoder_blocks(self, decoder_class, encoder_blocks, decoder_mirrors_encoder, decoder_blocks):
if decoder_mirrors_encoder:
decoder_blocks = encoder_blocks
if decoder_class is None:
decoder_class = MirroredDecoder
elif decoder_blocks is None:
decoder_blocks = [1] * len(encoder_blocks)
if decoder_class is None:
decoder_class = MyronenkoDecoder
return decoder_class, decoder_blocks
def forward(self, x):
y = None
identity = x
# print(x.size())
x = self.encoder(x)
x = self.decoder(x)
if self.is_dsv:
dsv3 = self.dsv3(x[0])
dsv2 = self.dsv2(x[1])
dsv1 = self.dsv1(x[2])
x = self.final_convolution(torch.cat([dsv1, dsv2, dsv3], dim=1))
else:
x = x[-1]
x = self.final_convolution(x)
# print(x.size())
if self.is_training and self.activation is not None:
y = self.activation(x)
# print(y.size())
x = torch.cat((x, identity), 1)
# print(x.size())
x = self.encoder2(x)
z1 = self.decoder2(x)
z1 = z1[-1]
z1 = self.final_convolution2(z1)
if self.activation2 is not None:
z1 = self.activation2(z1)
if self.is_training:
# print("1111")
z2 = self.decoder3(x)
z2 = z2[-1]
z2 = self.final_convolution3(z2)
if self.activation3 is not None:
z2 = self.activation3(z2)
outputs = [y, z1, z2]
else:
# print("2222")
outputs = z1
return outputs
class UnetDsv3(nn.Module):
def __init__(self, in_size, out_size, scale_factor):
super(UnetDsv3, self).__init__()
self.dsv = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size=1, stride=1, padding=0),
nn.Upsample(scale_factor=scale_factor, mode='trilinear', align_corners=False), )
def forward(self, x):
return self.dsv(x)
class MyronenkoVariationalLayer(nn.Module):
def __init__(self, in_features, input_shape, reduced_features=16, latent_features=128,
conv_block=MyronenkoConvolutionBlock, conv_stride=2, upsampling_mode="trilinear",
align_corners_upsampling=False):
super(MyronenkoVariationalLayer, self).__init__()
self.in_conv = conv_block(in_planes=in_features, planes=reduced_features, stride=conv_stride)
self.reduced_shape = tuple(np.asarray((reduced_features, *np.divide(input_shape, conv_stride)), dtype=np.int))
self.in_size = np.prod(self.reduced_shape, dtype=np.int)
self.var_block = VariationalBlock(in_size=self.in_size, out_size=self.in_size, n_features=latent_features)
self.relu = nn.ReLU(inplace=True)
self.out_conv = conv1x1x1(in_planes=reduced_features, out_planes=in_features, stride=1)
self.upsample = partial(nn.functional.interpolate, scale_factor=conv_stride, mode=upsampling_mode,
align_corners=align_corners_upsampling)
def forward(self, x):
x = self.in_conv(x).flatten(start_dim=1)
x, mu, logvar = self.var_block(x)
x = self.relu(x).view(-1, *self.reduced_shape)
x = self.out_conv(x)
x = self.upsample(x)
return x, mu, logvar
class VariationalAutoEncoder(ConvolutionalAutoEncoder):
def __init__(self, n_reduced_latent_feature_maps=16, vae_features=128, variational_layer=MyronenkoVariationalLayer,
input_shape=None, n_features=1, base_width=32, encoder_blocks=None, decoder_blocks=None,
feature_dilation=2, downsampling_stride=2, interpolation_mode="trilinear",
encoder_class=MyronenkoEncoder,
decoder_class=MyronenkoDecoder, n_outputs=None, layer_widths=None, decoder_mirrors_encoder=False,
activation=None, use_transposed_convolutions=False, var_layer_stride=2):
super(VariationalAutoEncoder, self).__init__(input_shape=input_shape, n_features=n_features,
base_width=base_width, encoder_blocks=encoder_blocks,
decoder_blocks=decoder_blocks, feature_dilation=feature_dilation,
downsampling_stride=downsampling_stride,
interpolation_mode=interpolation_mode, encoder_class=encoder_class,
decoder_class=decoder_class, n_outputs=n_outputs,
layer_widths=layer_widths,
decoder_mirrors_encoder=decoder_mirrors_encoder,
activation=activation,
use_transposed_convolutions=use_transposed_convolutions)
if vae_features is not None:
depth = len(encoder_blocks) - 1
n_latent_feature_maps = base_width * (feature_dilation ** depth)
latent_image_shape = np.divide(input_shape, downsampling_stride ** depth)
self.var_layer = variational_layer(in_features=n_latent_feature_maps,
input_shape=latent_image_shape,
reduced_features=n_reduced_latent_feature_maps,
latent_features=vae_features,
upsampling_mode=interpolation_mode,
conv_stride=var_layer_stride)
def forward(self, x):
x = self.encoder(x)
x, mu, logvar = self.var_layer(x)
x = self.decoder(x)
x = self.final_convolution(x)
if self.activation is not None:
x = self.activation(x)
return x, mu, logvar
def test(self, x):
x = self.encoder(x)
x, mu, logvar = self.var_layer(x)
x = self.decoder(mu)
x = self.final_convolution(x)
if self.activation is not None:
x = self.activation(x)
return x, mu, logvar
class LabeledVariationalAutoEncoder(VariationalAutoEncoder):
def __init__(self, *args, n_outputs=None, base_width=32, **kwargs):
super().__init__(*args, n_outputs=n_outputs, base_width=base_width, **kwargs)
self.final_convolution = conv1x1x1(in_planes=base_width, out_planes=n_outputs, stride=1)
| 54.396067
| 120
| 0.603796
| 4,077
| 38,730
| 5.408879
| 0.045622
| 0.042853
| 0.051016
| 0.031834
| 0.835344
| 0.821105
| 0.810629
| 0.804598
| 0.797524
| 0.790087
| 0
| 0.016148
| 0.323625
| 38,730
| 711
| 121
| 54.472574
| 0.82566
| 0.042577
| 0
| 0.75367
| 0
| 0
| 0.006371
| 0.001458
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066884
| false
| 0.001631
| 0.011419
| 0.006525
| 0.146819
| 0.008157
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
99ac908e76f3e53229300ba26ff6e55f50168e5d
| 17,397
|
py
|
Python
|
tests/test_genoquery.py
|
ethanwtai/ukbrest
|
c7303e57659ed787f002d2a53a54ca3a3f631a9b
|
[
"MIT"
] | 34
|
2018-04-17T03:02:38.000Z
|
2022-03-22T18:38:46.000Z
|
tests/test_genoquery.py
|
ethanwtai/ukbrest
|
c7303e57659ed787f002d2a53a54ca3a3f631a9b
|
[
"MIT"
] | 7
|
2019-08-07T10:46:34.000Z
|
2021-06-23T13:30:59.000Z
|
tests/test_genoquery.py
|
ethanwtai/ukbrest
|
c7303e57659ed787f002d2a53a54ca3a3f631a9b
|
[
"MIT"
] | 16
|
2018-08-26T08:08:05.000Z
|
2021-09-08T22:30:06.000Z
|
import os
import unittest
import shutil
from os.path import isfile, isdir
from ukbrest.common.utils.external import qctool
from tests.utils import get_repository_path
from ukbrest.common.genoquery import GenoQuery
class UKBQueryTest(unittest.TestCase):
def test_query_incl_range_lower_and_upper_limits_at_beginning(self):
# prepare
genoq = GenoQuery(get_repository_path('example01'))
# run
bgen_file = genoq.get_incl_range(chr=1, start=100, stop=276)
# validate
assert bgen_file is not None
assert isfile(bgen_file)
results = qctool(bgen_file)
assert results is not None
assert hasattr(results, 'shape')
assert hasattr(results, 'columns')
assert results.shape[1] == 6 + 300 * 3
assert results.shape[0] == 3
rsid_values = results['rsid'].unique()
assert len(rsid_values) == 3
assert results.loc[0, 'rsid'] == 'rs1'
assert results.loc[1, 'rsid'] == 'rs2'
assert results.loc[2, 'rsid'] == 'rs3'
assert results.loc[0, 'allele1'] == 'G'
assert results.loc[0, 'allele2'] == 'A'
assert results.loc[1, 'allele1'] == 'G'
assert results.loc[1, 'allele2'] == 'C'
assert results.loc[2, 'allele1'] == 'C'
assert results.loc[2, 'allele2'] == 'A'
assert results.loc[0, '1.aa'] == 0.7491
assert results.loc[0, '1.ab'] == 0.0133
assert results.loc[0, '1.bb'] == 0.2376
assert results.loc[1, '2.aa'] == 0.8654
assert results.loc[1, '2.ab'] == 0.1041
assert results.loc[1, '2.bb'] == 0.0306
assert results.loc[2, '300.aa'] == 0.0828
assert results.loc[2, '300.ab'] == 0.7752
assert results.loc[2, '300.bb'] == 0.1421
pos_values = results['pos'].unique()
assert len(pos_values) == 3
assert results.loc[0, 'pos'] == 100
assert results.loc[1, 'pos'] == 181
assert results.loc[2, 'pos'] == 276
def test_query_incl_range_lower_and_upper_limits_at_end(self):
# prepare
genoq = GenoQuery(get_repository_path('example01'))
# run
bgen_file = genoq.get_incl_range(chr=1, start=18058, stop=18389)
# validate
assert bgen_file is not None
assert isfile(bgen_file)
results = qctool(bgen_file)
assert results is not None
assert hasattr(results, 'shape')
assert hasattr(results, 'columns')
assert results.shape[1] == 6 + 300 * 3
assert results.shape[0] == 5
rsid_values = results['rsid'].unique()
assert len(rsid_values) == 5
assert results.loc[0, 'rsid'] == 'rs246'
assert results.loc[1, 'rsid'] == 'rs247'
assert results.loc[2, 'rsid'] == 'rs248'
assert results.loc[3, 'rsid'] == 'rs249'
assert results.loc[4, 'rsid'] == 'rs250'
assert results.loc[0, 'allele1'] == 'C'
assert results.loc[0, 'allele2'] == 'A'
assert results.loc[1, 'allele1'] == 'T'
assert results.loc[1, 'allele2'] == 'C'
assert results.loc[2, 'allele1'] == 'G'
assert results.loc[2, 'allele2'] == 'C'
assert results.loc[3, 'allele1'] == 'G'
assert results.loc[3, 'allele2'] == 'A'
assert results.loc[4, 'allele1'] == 'T'
assert results.loc[4, 'allele2'] == 'C'
assert results.loc[0, '1.aa'] == 0.0537
assert results.loc[0, '1.ab'] == 0.9160
assert results.loc[0, '1.bb'] == 0.0302
assert results.loc[1, '2.aa'] == 0.0698
assert results.loc[1, '2.ab'] == 0.9116
assert results.loc[1, '2.bb'] == 0.0186
assert results.loc[2, '300.aa'] == 0.0826
assert results.loc[2, '300.ab'] == 0.0316
assert results.loc[2, '300.bb'] == 0.8858
assert results.loc[3, '299.aa'] == 0.7988
assert results.loc[3, '299.ab'] == 0.1666
assert results.loc[3, '299.bb'] == 0.0346
assert results.loc[4, '150.aa'] == 0.0773
assert results.loc[4, '150.ab'] == 0.8683
assert results.loc[4, '150.bb'] == 0.0544
pos_values = results['pos'].unique()
assert len(pos_values) == 5
assert results.loc[0, 'pos'] == 18058
assert results.loc[1, 'pos'] == 18139
assert results.loc[2, 'pos'] == 18211
assert results.loc[3, 'pos'] == 18294
assert results.loc[4, 'pos'] == 18389
def test_query_incl_range_lower_limit_only(self):
# prepare
genoq = GenoQuery(get_repository_path('example01'))
# run
bgen_file = genoq.get_incl_range(chr=1, start=18058)
# validate
assert bgen_file is not None
assert isfile(bgen_file)
results = qctool(bgen_file)
assert results is not None
assert hasattr(results, 'shape')
assert hasattr(results, 'columns')
assert results.shape[1] == 6 + 300 * 3
assert results.shape[0] == 5
rsid_values = results['rsid'].unique()
assert len(rsid_values) == 5
assert results.loc[0, 'rsid'] == 'rs246'
assert results.loc[1, 'rsid'] == 'rs247'
assert results.loc[2, 'rsid'] == 'rs248'
assert results.loc[3, 'rsid'] == 'rs249'
assert results.loc[4, 'rsid'] == 'rs250'
assert results.loc[0, 'allele1'] == 'C'
assert results.loc[0, 'allele2'] == 'A'
assert results.loc[1, 'allele1'] == 'T'
assert results.loc[1, 'allele2'] == 'C'
assert results.loc[2, 'allele1'] == 'G'
assert results.loc[2, 'allele2'] == 'C'
assert results.loc[3, 'allele1'] == 'G'
assert results.loc[3, 'allele2'] == 'A'
assert results.loc[4, 'allele1'] == 'T'
assert results.loc[4, 'allele2'] == 'C'
assert results.loc[0, '1.aa'] == 0.0537
assert results.loc[0, '1.ab'] == 0.9160
assert results.loc[0, '1.bb'] == 0.0302
assert results.loc[1, '2.aa'] == 0.0698
assert results.loc[1, '2.ab'] == 0.9116
assert results.loc[1, '2.bb'] == 0.0186
assert results.loc[2, '300.aa'] == 0.0826
assert results.loc[2, '300.ab'] == 0.0316
assert results.loc[2, '300.bb'] == 0.8858
assert results.loc[3, '299.aa'] == 0.7988
assert results.loc[3, '299.ab'] == 0.1666
assert results.loc[3, '299.bb'] == 0.0346
assert results.loc[4, '150.aa'] == 0.0773
assert results.loc[4, '150.ab'] == 0.8683
assert results.loc[4, '150.bb'] == 0.0544
pos_values = results['pos'].unique()
assert len(pos_values) == 5
assert results.loc[0, 'pos'] == 18058
assert results.loc[1, 'pos'] == 18139
assert results.loc[2, 'pos'] == 18211
assert results.loc[3, 'pos'] == 18294
assert results.loc[4, 'pos'] == 18389
def test_query_incl_range_upper_limit_only(self):
# prepare
genoq = GenoQuery(get_repository_path('example01'))
# run
bgen_file = genoq.get_incl_range(chr=1, stop=276)
# validate
assert bgen_file is not None
assert isfile(bgen_file)
results = qctool(bgen_file)
assert results is not None
assert hasattr(results, 'shape')
assert hasattr(results, 'columns')
assert results.shape[1] == 6 + 300 * 3
assert results.shape[0] == 3
rsid_values = results['rsid'].unique()
assert len(rsid_values) == 3
assert results.loc[0, 'rsid'] == 'rs1'
assert results.loc[1, 'rsid'] == 'rs2'
assert results.loc[2, 'rsid'] == 'rs3'
assert results.loc[0, 'allele1'] == 'G'
assert results.loc[0, 'allele2'] == 'A'
assert results.loc[1, 'allele1'] == 'G'
assert results.loc[1, 'allele2'] == 'C'
assert results.loc[2, 'allele1'] == 'C'
assert results.loc[2, 'allele2'] == 'A'
assert results.loc[0, '1.aa'] == 0.7491
assert results.loc[0, '1.ab'] == 0.0133
assert results.loc[0, '1.bb'] == 0.2376
assert results.loc[1, '2.aa'] == 0.8654
assert results.loc[1, '2.ab'] == 0.1041
assert results.loc[1, '2.bb'] == 0.0306
assert results.loc[2, '300.aa'] == 0.0828
assert results.loc[2, '300.ab'] == 0.7752
assert results.loc[2, '300.bb'] == 0.1421
pos_values = results['pos'].unique()
assert len(pos_values) == 3
assert results.loc[0, 'pos'] == 100
assert results.loc[1, 'pos'] == 181
assert results.loc[2, 'pos'] == 276
def test_query_incl_range_using_file(self):
# prepare
genoq = GenoQuery(get_repository_path('example01'))
# positions are not ordered in the file, but they should be returned ordered
positions_file = get_repository_path('example01/positions01.txt')
# run
bgen_file = genoq.get_incl_range_from_file(2, positions_file)
# validate
assert bgen_file is not None
assert isfile(bgen_file)
results = qctool(bgen_file)
assert results is not None
assert hasattr(results, 'shape')
assert hasattr(results, 'columns')
assert results.shape[1] == 6 + 300 * 3
assert results.shape[0] == 5
rsid_values = results['rsid'].unique()
assert len(rsid_values) == 5
assert results.loc[0, 'rsid'] == 'rs2000003'
assert results.loc[1, 'rsid'] == 'rs2000008'
assert results.loc[2, 'rsid'] == 'rs2000094'
assert results.loc[3, 'rsid'] == 'rs2000118'
assert results.loc[4, 'rsid'] == 'rs2000149'
assert results.loc[0, 'allele1'] == 'C'
assert results.loc[0, 'allele2'] == 'G'
assert results.loc[1, 'allele1'] == 'T'
assert results.loc[1, 'allele2'] == 'A'
assert results.loc[2, 'allele1'] == 'C'
assert results.loc[2, 'allele2'] == 'G'
assert results.loc[3, 'allele1'] == 'T'
assert results.loc[3, 'allele2'] == 'C'
assert results.loc[4, 'allele1'] == 'G'
assert results.loc[4, 'allele2'] == 'T'
assert results.loc[0, '1.aa'] == 0.7889
assert results.loc[0, '1.ab'] == 0.1538
assert results.loc[0, '1.bb'] == 0.0573
assert results.loc[1, '2.aa'] == 0.8776
assert results.loc[1, '2.ab'] == 0.0670
assert results.loc[1, '2.bb'] == 0.0554
assert results.loc[2, '3.aa'] == 0.0553
assert results.loc[2, '3.ab'] == 0.0939
assert results.loc[2, '3.bb'] == 0.8509
assert results.loc[3, '1.aa'] == 0.1219
assert results.loc[3, '1.ab'] == 0.8459
assert results.loc[3, '1.bb'] == 0.0323
assert results.loc[4, '2.aa'] == 0.0137
assert results.loc[4, '2.ab'] == 0.0953
assert results.loc[4, '2.bb'] == 0.8909
pos_values = results['pos'].unique()
assert len(pos_values) == 5
assert results.loc[0, 'pos'] == 300
assert results.loc[1, 'pos'] == 661
assert results.loc[2, 'pos'] == 7181
assert results.loc[3, 'pos'] == 8949
assert results.loc[4, 'pos'] == 11226
def test_query_incl_rsids_single(self):
# prepare
genoq = GenoQuery(get_repository_path('example01'))
# run
bgen_file = genoq.get_incl_rsids(2, ['rs2000082'])
# validate
assert bgen_file is not None
assert isfile(bgen_file)
results = qctool(bgen_file)
assert results is not None
assert hasattr(results, 'shape')
assert hasattr(results, 'columns')
assert results.shape[1] == 6 + 300 * 3
assert results.shape[0] == 1
rsid_values = results['rsid'].unique()
assert len(rsid_values) == 1
assert results.loc[0, 'rsid'] == 'rs2000082'
assert results.loc[0, 'allele1'] == 'A'
assert results.loc[0, 'allele2'] == 'T'
assert results.loc[0, '1.aa'] == 0.0016
assert results.loc[0, '1.ab'] == 0.8613
assert results.loc[0, '1.bb'] == 0.1371
assert results.loc[0, '300.aa'] == 0.0234
assert results.loc[0, '300.ab'] == 0.0148
assert results.loc[0, '300.bb'] == 0.9618
pos_values = results['pos'].unique()
assert len(pos_values) == 1
assert results.loc[0, 'pos'] == 6192
def test_query_incl_rsids_multiple(self):
# prepare
genoq = GenoQuery(get_repository_path('example01'))
# run
bgen_file = genoq.get_incl_rsids(2, ['rs2000082', 'rs2000142'])
# validate
assert bgen_file is not None
assert isfile(bgen_file)
results = qctool(bgen_file)
assert results is not None
assert hasattr(results, 'shape')
assert hasattr(results, 'columns')
assert results.shape[1] == 6 + 300 * 3
assert results.shape[0] == 2
rsid_values = results['rsid'].unique()
assert len(rsid_values) == 2
assert results.loc[0, 'rsid'] == 'rs2000082'
assert results.loc[1, 'rsid'] == 'rs2000142'
assert results.loc[0, 'allele1'] == 'A'
assert results.loc[0, 'allele2'] == 'T'
assert results.loc[1, 'allele1'] == 'T'
assert results.loc[1, 'allele2'] == 'G'
assert results.loc[0, '1.aa'] == 0.0016
assert results.loc[0, '1.ab'] == 0.8613
assert results.loc[0, '1.bb'] == 0.1371
assert results.loc[0, '300.aa'] == 0.0234
assert results.loc[0, '300.ab'] == 0.0148
assert results.loc[0, '300.bb'] == 0.9618
assert results.loc[1, '1.aa'] == 0.9619
assert results.loc[1, '1.ab'] == 0.0015
assert results.loc[1, '1.bb'] == 0.0366
assert results.loc[1, '300.aa'] == 0.0185
assert results.loc[1, '300.ab'] == 0.1408
assert results.loc[1, '300.bb'] == 0.8407
pos_values = results['pos'].unique()
assert len(pos_values) == 2
assert results.loc[0, 'pos'] == 6192
assert results.loc[1, 'pos'] == 10750
def test_query_incl_rsids_using_file(self):
# prepare
genoq = GenoQuery(get_repository_path('example01'))
# rsids are not ordered in the file, but they should be returned ordered
rsids_file = get_repository_path('example01/rsids01.txt')
# run
bgen_file = genoq.get_incl_rsids(2, [rsids_file])
# validate
assert bgen_file is not None
assert isfile(bgen_file)
results = qctool(bgen_file)
assert results is not None
assert hasattr(results, 'shape')
assert hasattr(results, 'columns')
assert results.shape[1] == 6 + 300 * 3
assert results.shape[0] == 5
rsid_values = results['rsid'].unique()
assert len(rsid_values) == 5
assert results.loc[0, 'rsid'] == 'rs2000000'
assert results.loc[1, 'rsid'] == 'rs2000020'
assert results.loc[2, 'rsid'] == 'rs2000079'
assert results.loc[3, 'rsid'] == 'rs2000138'
assert results.loc[4, 'rsid'] == 'rs2000149'
assert results.loc[0, 'allele1'] == 'A'
assert results.loc[0, 'allele2'] == 'G'
assert results.loc[1, 'allele1'] == 'G'
assert results.loc[1, 'allele2'] == 'C'
assert results.loc[2, 'allele1'] == 'C'
assert results.loc[2, 'allele2'] == 'A'
assert results.loc[3, 'allele1'] == 'A'
assert results.loc[3, 'allele2'] == 'G'
assert results.loc[4, 'allele1'] == 'G'
assert results.loc[4, 'allele2'] == 'T'
assert results.loc[0, '1.aa'] == 0.9440
assert results.loc[0, '1.ab'] == 0.0298
assert results.loc[0, '1.bb'] == 0.0262
assert results.loc[1, '2.aa'] == 0.1534
assert results.loc[1, '2.ab'] == 0.7249
assert results.loc[1, '2.bb'] == 0.1218
assert results.loc[2, '3.aa'] == 0.9357
assert results.loc[2, '3.ab'] == 0.0047
assert results.loc[2, '3.bb'] == 0.0596
assert results.loc[3, '1.aa'] == 0.8246
assert results.loc[3, '1.ab'] == 0.0686
assert results.loc[3, '1.bb'] == 0.1068
assert results.loc[4, '2.aa'] == 0.0137
assert results.loc[4, '2.ab'] == 0.0953
assert results.loc[4, '2.bb'] == 0.8909
pos_values = results['pos'].unique()
assert len(pos_values) == 5
assert results.loc[0, 'pos'] == 100
assert results.loc[1, 'pos'] == 1623
assert results.loc[2, 'pos'] == 5925
assert results.loc[3, 'pos'] == 10447
assert results.loc[4, 'pos'] == 11226
def test_query_incl_range_temp_directory(self):
# prepare
shutil.rmtree('/tmp/ukbrest_different/', ignore_errors=True)
genoq = GenoQuery(get_repository_path('example01'), tmpdir='/tmp/ukbrest_different/')
# run
bgen_file = genoq.get_incl_range(chr=1, start=100, stop=276)
# validate
assert bgen_file is not None
assert isfile(bgen_file)
results = qctool(bgen_file)
assert results is not None
assert hasattr(results, 'shape')
assert hasattr(results, 'columns')
assert results.shape[1] == 6 + 300 * 3
assert results.shape[0] == 3
assert isdir('/tmp/ukbrest_different/')
assert len(os.listdir('/tmp/ukbrest_different/')) == 1
# position that does not exist?
# rsids does not exist?
# exclude positions
# exclude rsids
| 33.584942
| 93
| 0.565385
| 2,345
| 17,397
| 4.120256
| 0.089126
| 0.321569
| 0.351066
| 0.109087
| 0.915338
| 0.856965
| 0.846926
| 0.795591
| 0.79259
| 0.766301
| 0
| 0.105986
| 0.275967
| 17,397
| 517
| 94
| 33.649903
| 0.661083
| 0.024027
| 0
| 0.747191
| 0
| 0
| 0.101033
| 0.008144
| 0
| 0
| 0
| 0
| 0.823034
| 1
| 0.025281
| false
| 0
| 0.019663
| 0
| 0.047753
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
41f9baea3488032ba404d0c97ee5c19e113fa713
| 105
|
py
|
Python
|
test/SIM_test_ip/Modified_data/next_level.py
|
gilbertguoze/trick
|
f0537efb0fa3cb5c0c84e36b60f055c1d1c60d21
|
[
"NASA-1.3"
] | 647
|
2015-05-07T16:08:16.000Z
|
2022-03-30T02:33:21.000Z
|
test/SIM_test_ip/Modified_data/next_level.py
|
gilbertguoze/trick
|
f0537efb0fa3cb5c0c84e36b60f055c1d1c60d21
|
[
"NASA-1.3"
] | 995
|
2015-04-30T19:44:31.000Z
|
2022-03-31T20:14:44.000Z
|
test/SIM_test_ip/Modified_data/next_level.py
|
gilbertguoze/trick
|
f0537efb0fa3cb5c0c84e36b60f055c1d1c60d21
|
[
"NASA-1.3"
] | 251
|
2015-05-15T09:24:34.000Z
|
2022-03-22T20:39:05.000Z
|
test.ip.c_pointer_types.cpp[0][0][0] = ["Cat" , "Dog"]
test.ip.c_pointer_types.cpp[0][0][1] = ["Horse"]
| 26.25
| 54
| 0.619048
| 21
| 105
| 2.904762
| 0.52381
| 0.098361
| 0.229508
| 0.459016
| 0.786885
| 0.786885
| 0.786885
| 0.786885
| 0
| 0
| 0
| 0.0625
| 0.085714
| 105
| 3
| 55
| 35
| 0.572917
| 0
| 0
| 0
| 0
| 0
| 0.105769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5141a249b4278c073e2e295dc75bc52fecdf5cd7
| 14,120
|
py
|
Python
|
src/lepus/migrations/0003_auto_20160921_1143.py
|
Cpaw/lepus-api
|
71f68881d5fdcc47d31e2ecab41b24582cb25ebe
|
[
"MIT"
] | null | null | null |
src/lepus/migrations/0003_auto_20160921_1143.py
|
Cpaw/lepus-api
|
71f68881d5fdcc47d31e2ecab41b24582cb25ebe
|
[
"MIT"
] | null | null | null |
src/lepus/migrations/0003_auto_20160921_1143.py
|
Cpaw/lepus-api
|
71f68881d5fdcc47d31e2ecab41b24582cb25ebe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('lepus', '0002_auto_20150913_1811'),
]
operations = [
migrations.AlterModelOptions(
name='question',
options={'ordering': ('category', 'ordering', 'id')},
),
migrations.AlterField(
model_name='answer',
name='answer',
field=models.CharField(max_length=256, verbose_name=b'\xe8\xa7\xa3\xe7\xad\x94'),
),
migrations.AlterField(
model_name='answer',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe4\xbd\x9c\xe6\x88\x90\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='answer',
name='question',
field=models.ForeignKey(verbose_name=b'\xe5\x95\x8f\xe9\xa1\x8c', to='lepus.Question'),
),
migrations.AlterField(
model_name='answer',
name='team',
field=models.ForeignKey(verbose_name=b'\xe3\x83\x81\xe3\x83\xbc\xe3\x83\xa0', to='lepus.Team'),
),
migrations.AlterField(
model_name='answer',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name=b'\xe6\x9c\x80\xe7\xb5\x82\xe6\x9b\xb4\xe6\x96\xb0\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='answer',
name='user',
field=models.ForeignKey(verbose_name=b'\xe3\x83\xa6\xe3\x83\xbc\xe3\x82\xb6\xe3\x83\xbc', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='attackpoint',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe4\xbd\x9c\xe6\x88\x90\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='attackpoint',
name='point',
field=models.IntegerField(verbose_name=b'\xe5\xbe\x97\xe7\x82\xb9'),
),
migrations.AlterField(
model_name='attackpoint',
name='question',
field=models.ForeignKey(verbose_name=b'\xe5\x95\x8f\xe9\xa1\x8c', to='lepus.Question'),
),
migrations.AlterField(
model_name='attackpoint',
name='team',
field=models.ForeignKey(verbose_name=b'\xe3\x83\x81\xe3\x83\xbc\xe3\x83\xa0', to='lepus.Team'),
),
migrations.AlterField(
model_name='attackpoint',
name='token',
field=models.CharField(unique=True, max_length=256, verbose_name=b'\xe3\x83\x88\xe3\x83\xbc\xe3\x82\xaf\xe3\x83\xb3'),
),
migrations.AlterField(
model_name='attackpoint',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name=b'\xe6\x9c\x80\xe7\xb5\x82\xe6\x9b\xb4\xe6\x96\xb0\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='attackpoint',
name='user',
field=models.ForeignKey(verbose_name=b'\xe3\x83\xa6\xe3\x83\xbc\xe3\x82\xb6\xe3\x83\xbc', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='category',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe4\xbd\x9c\xe6\x88\x90\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(unique=True, max_length=50, verbose_name=b'\xe3\x82\xab\xe3\x83\x86\xe3\x82\xb4\xe3\x83\xaa\xe5\x90\x8d'),
),
migrations.AlterField(
model_name='category',
name='ordering',
field=models.IntegerField(default=100, verbose_name=b'\xe8\xa1\xa8\xe7\xa4\xba\xe9\xa0\x86\xe5\xba\x8f'),
),
migrations.AlterField(
model_name='category',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name=b'\xe6\x9c\x80\xe7\xb5\x82\xe6\x9b\xb4\xe6\x96\xb0\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='config',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe4\xbd\x9c\xe6\x88\x90\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='config',
name='key',
field=models.CharField(unique=True, max_length=256, verbose_name=b'\xe8\xa8\xad\xe5\xae\x9a\xe9\xa0\x85\xe7\x9b\xae'),
),
migrations.AlterField(
model_name='config',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name=b'\xe6\x9c\x80\xe7\xb5\x82\xe6\x9b\xb4\xe6\x96\xb0\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='config',
name='value_str',
field=models.TextField(verbose_name=b'\xe3\x82\xb7\xe3\x83\xaa\xe3\x82\xa2\xe3\x83\xa9\xe3\x82\xa4\xe3\x82\xba\xe3\x81\x95\xe3\x82\x8c\xe3\x81\x9f\xe5\x80\xa4'),
),
migrations.AlterField(
model_name='file',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe4\xbd\x9c\xe6\x88\x90\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='file',
name='file',
field=models.FileField(upload_to=b'question/', max_length=256, verbose_name=b'\xe3\x83\x95\xe3\x82\xa1\xe3\x82\xa4\xe3\x83\xab'),
),
migrations.AlterField(
model_name='file',
name='is_public',
field=models.BooleanField(default=True, verbose_name=b'\xe5\x85\xac\xe9\x96\x8b\xe3\x81\x99\xe3\x82\x8b\xe3\x81\x8b'),
),
migrations.AlterField(
model_name='file',
name='name',
field=models.CharField(max_length=256, verbose_name=b'\xe3\x83\x95\xe3\x82\xa1\xe3\x82\xa4\xe3\x83\xab\xe5\x90\x8d'),
),
migrations.AlterField(
model_name='file',
name='question',
field=models.ForeignKey(verbose_name=b'\xe5\x95\x8f\xe9\xa1\x8c', to='lepus.Question'),
),
migrations.AlterField(
model_name='file',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name=b'\xe6\x9c\x80\xe7\xb5\x82\xe6\x9b\xb4\xe6\x96\xb0\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='flag',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe4\xbd\x9c\xe6\x88\x90\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='flag',
name='point',
field=models.IntegerField(verbose_name=b'\xe5\xbe\x97\xe7\x82\xb9'),
),
migrations.AlterField(
model_name='flag',
name='question',
field=models.ForeignKey(verbose_name=b'\xe5\x95\x8f\xe9\xa1\x8c', to='lepus.Question'),
),
migrations.AlterField(
model_name='flag',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name=b'\xe6\x9c\x80\xe7\xb5\x82\xe6\x9b\xb4\xe6\x96\xb0\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='notice',
name='body',
field=models.TextField(verbose_name=b'\xe6\x9c\xac\xe6\x96\x87'),
),
migrations.AlterField(
model_name='notice',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe4\xbd\x9c\xe6\x88\x90\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='notice',
name='is_public',
field=models.BooleanField(default=False, verbose_name=b'\xe5\x85\xac\xe9\x96\x8b\xe3\x81\xab\xe3\x81\x99\xe3\x82\x8b\xe3\x81\x8b'),
),
migrations.AlterField(
model_name='notice',
name='title',
field=models.CharField(max_length=80, verbose_name=b'\xe3\x82\xbf\xe3\x82\xa4\xe3\x83\x88\xe3\x83\xab'),
),
migrations.AlterField(
model_name='notice',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name=b'\xe6\x9c\x80\xe7\xb5\x82\xe6\x9b\xb4\xe6\x96\xb0\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='question',
name='category',
field=models.ForeignKey(verbose_name=b'\xe3\x82\xab\xe3\x83\x86\xe3\x82\xb4\xe3\x83\xaa', to='lepus.Category'),
),
migrations.AlterField(
model_name='question',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe4\xbd\x9c\xe6\x88\x90\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='question',
name='is_public',
field=models.BooleanField(default=False, verbose_name=b'\xe5\x85\xac\xe9\x96\x8b\xe3\x81\xab\xe3\x81\x99\xe3\x82\x8b\xe3\x81\x8b'),
),
migrations.AlterField(
model_name='question',
name='max_answers',
field=models.IntegerField(null=True, verbose_name=b'\xe6\x9c\x80\xe5\xa4\xa7\xe5\x9b\x9e\xe7\xad\x94\xe8\x80\x85\xe6\x95\xb0', blank=True),
),
migrations.AlterField(
model_name='question',
name='max_failure',
field=models.IntegerField(null=True, verbose_name=b'\xe6\x9c\x80\xe5\xa4\xa7\xe5\x9b\x9e\xe7\xad\x94\xe6\x95\xb0', blank=True),
),
migrations.AlterField(
model_name='question',
name='ordering',
field=models.IntegerField(default=100, verbose_name=b'\xe8\xa1\xa8\xe7\xa4\xba\xe9\xa0\x86\xe5\xba\x8f'),
),
migrations.AlterField(
model_name='question',
name='sentence',
field=models.TextField(verbose_name=b'\xe5\x95\x8f\xe9\xa1\x8c\xe6\x96\x87'),
),
migrations.AlterField(
model_name='question',
name='title',
field=models.CharField(max_length=50, verbose_name=b'\xe3\x82\xbf\xe3\x82\xa4\xe3\x83\x88\xe3\x83\xab'),
),
migrations.AlterField(
model_name='question',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name=b'\xe6\x9c\x80\xe7\xb5\x82\xe6\x9b\xb4\xe6\x96\xb0\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='team',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe4\xbd\x9c\xe6\x88\x90\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='team',
name='last_score_time',
field=models.DateTimeField(null=True, verbose_name=b'\xe6\x9c\x80\xe7\xb5\x82\xe5\xbe\x97\xe7\x82\xb9\xe6\x97\xa5\xe6\x99\x82', blank=True),
),
migrations.AlterField(
model_name='team',
name='name',
field=models.CharField(unique=True, max_length=32, verbose_name=b'\xe3\x83\x81\xe3\x83\xbc\xe3\x83\xa0\xe5\x90\x8d'),
),
migrations.AlterField(
model_name='team',
name='password',
field=models.CharField(max_length=128, verbose_name=b'\xe3\x83\x81\xe3\x83\xbc\xe3\x83\xa0\xe3\x83\x91\xe3\x82\xb9\xe3\x83\xaf\xe3\x83\xbc\xe3\x83\x89'),
),
migrations.AlterField(
model_name='team',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name=b'\xe6\x9c\x80\xe7\xb5\x82\xe6\x9b\xb4\xe6\x96\xb0\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='user',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe4\xbd\x9c\xe6\x88\x90\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='user',
name='last_score_time',
field=models.DateTimeField(null=True, verbose_name=b'\xe6\x9c\x80\xe7\xb5\x82\xe5\xbe\x97\xe7\x82\xb9\xe6\x97\xa5\xe6\x99\x82', blank=True),
),
migrations.AlterField(
model_name='user',
name='seat',
field=models.CharField(max_length=32, verbose_name=b'\xe5\xba\xa7\xe5\xb8\xad', blank=True),
),
migrations.AlterField(
model_name='user',
name='team',
field=models.ForeignKey(verbose_name=b'\xe3\x83\x81\xe3\x83\xbc\xe3\x83\xa0', blank=True, to='lepus.Team', null=True),
),
migrations.AlterField(
model_name='user',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name=b'\xe6\x9c\x80\xe7\xb5\x82\xe6\x9b\xb4\xe6\x96\xb0\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='userconnection',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'\xe4\xbd\x9c\xe6\x88\x90\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='userconnection',
name='ip',
field=models.GenericIPAddressField(verbose_name=b'IP\xe3\x82\xa2\xe3\x83\x89\xe3\x83\xac\xe3\x82\xb9'),
),
migrations.AlterField(
model_name='userconnection',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name=b'\xe6\x9c\x80\xe7\xb5\x82\xe6\x9b\xb4\xe6\x96\xb0\xe6\x97\xa5\xe6\x99\x82'),
),
migrations.AlterField(
model_name='userconnection',
name='user',
field=models.ForeignKey(verbose_name=b'\xe3\x83\xa6\xe3\x83\xbc\xe3\x82\xb6\xe3\x83\xbc', to=settings.AUTH_USER_MODEL),
),
]
| 44.825397
| 173
| 0.59568
| 1,812
| 14,120
| 4.52649
| 0.084437
| 0.143867
| 0.179834
| 0.208608
| 0.919044
| 0.903438
| 0.830773
| 0.780785
| 0.772373
| 0.749451
| 0
| 0.098526
| 0.250283
| 14,120
| 314
| 174
| 44.968153
| 0.676271
| 0.001487
| 0
| 0.857143
| 0
| 0.149351
| 0.295382
| 0.223097
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.003247
| 0.00974
| 0
| 0.019481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
515d28619e465b1ea6835dd5a145548797b37368
| 5,665
|
py
|
Python
|
tests/test_models.py
|
ksangeeta2429/Edgel3
|
b05892cf7c5be079639b8cccd51ead93fe1eae1a
|
[
"MIT"
] | 3
|
2020-02-14T21:56:09.000Z
|
2020-09-08T11:54:05.000Z
|
tests/test_models.py
|
ksangeeta2429/Edgel3
|
b05892cf7c5be079639b8cccd51ead93fe1eae1a
|
[
"MIT"
] | 4
|
2020-05-11T17:00:41.000Z
|
2021-02-02T22:38:58.000Z
|
tests/test_models.py
|
ksangeeta2429/Edgel3
|
b05892cf7c5be079639b8cccd51ead93fe1eae1a
|
[
"MIT"
] | 6
|
2020-03-20T13:54:02.000Z
|
2021-03-21T04:51:34.000Z
|
from edgel3.models import load_embedding_model, load_embedding_model_path
def test_load_embedding_model_path():
# Check for output paths for UST specialized embedding approximated L3 models
embedding_model_path = load_embedding_model_path('sea', 512, 'ft', 95.45)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_sea_ust_audio_emb_512.h5'
embedding_model_path = load_embedding_model_path('sea', 512, 'kd', 95.45)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_sea_ust_audio_emb_512.h5'
embedding_model_path = load_embedding_model_path('sea', 256, 'ft', 95.45)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_sea_ust_audio_emb_256.h5'
embedding_model_path = load_embedding_model_path('sea', 128, 'ft', 95.45)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_sea_ust_audio_emb_128.h5'
embedding_model_path = load_embedding_model_path('sea', 64, 'ft', 95.45)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_sea_ust_audio_emb_64.h5'
# Check the output paths of fine-tuned sparse L3 models
embedding_model_path = load_embedding_model_path('sparse', 128, 'ft', 53.5)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_ft_audio_sparsity_53.5.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'ft', 63.5)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_ft_audio_sparsity_63.5.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'ft', 72.3)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_ft_audio_sparsity_72.3.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'ft', 73.5)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_ft_audio_sparsity_73.5.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'ft', 81.0)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_ft_audio_sparsity_81.0.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'ft', 87.0)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_ft_audio_sparsity_87.0.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'ft', 90.5)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_ft_audio_sparsity_90.5.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'ft', 95.45)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_ft_audio_sparsity_95.45.h5'
# Check the output paths for knowledge distilled sparse L3 models
embedding_model_path = load_embedding_model_path('sparse', 128, 'kd', 53.5)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_kd_audio_sparsity_53.5.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'kd', 63.5)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_kd_audio_sparsity_63.5.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'kd', 72.3)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_kd_audio_sparsity_72.3.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'kd', 73.5)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_kd_audio_sparsity_73.5.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'kd', 81.0)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_kd_audio_sparsity_81.0.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'kd', 87.0)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_kd_audio_sparsity_87.0.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'kd', 90.5)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_kd_audio_sparsity_90.5.h5'
embedding_model_path = load_embedding_model_path('sparse', 128, 'kd', 95.45)
assert '/'.join(embedding_model_path.split('/')[-2:]) == 'edgel3/edgel3_kd_audio_sparsity_95.45.h5'
def test_load_embedding_model():
# Check for fine-tuned sparse L3 models
m = load_embedding_model('sparse', 128, 'ft', 53.5)
assert m.output_shape[1] == 512
m = load_embedding_model('sparse', 128, 'ft', 63.5)
assert m.output_shape[1] == 512
m = load_embedding_model('sparse', 128, 'ft', 72.3)
assert m.output_shape[1] == 512
m = load_embedding_model('sparse', 128, 'ft', 87.0)
assert m.output_shape[1] == 512
m = load_embedding_model('sparse', 128, 'ft', 95.45)
assert m.output_shape[1] == 512
# Check for knowledge distilled sparse L3 models
m = load_embedding_model('sparse', 128, 'kd', 53.5)
assert m.output_shape[1] == 512
m = load_embedding_model('sparse', 128, 'kd', 63.5)
assert m.output_shape[1] == 512
m = load_embedding_model('sparse', 128, 'kd', 72.3)
assert m.output_shape[1] == 512
m = load_embedding_model('sparse', 128, 'kd', 87.0)
assert m.output_shape[1] == 512
m = load_embedding_model('sparse', 128, 'kd', 95.45)
assert m.output_shape[1] == 512
# Check for UST specialized embedding approximated L3 models
m = load_embedding_model('sea', 512, 'ft', 95.45)
assert m.output_shape[1] == 512
m = load_embedding_model('sea', 256, 'ft', 95.45)
assert m.output_shape[1] == 256
m = load_embedding_model('sea', 128, 'kd', 95.45)
assert m.output_shape[1] == 128
m = load_embedding_model('sea', 64, 'kd', 95.45)
assert m.output_shape[1] == 64
| 48.836207
| 103
| 0.699029
| 852
| 5,665
| 4.307512
| 0.064554
| 0.308992
| 0.318801
| 0.137875
| 0.968665
| 0.923978
| 0.904087
| 0.880926
| 0.867302
| 0.795095
| 0
| 0.087798
| 0.133451
| 5,665
| 115
| 104
| 49.26087
| 0.659809
| 0.059488
| 0
| 0.178082
| 0
| 0
| 0.208607
| 0.153167
| 0
| 0
| 0
| 0
| 0.479452
| 1
| 0.027397
| false
| 0
| 0.013699
| 0
| 0.041096
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5abcfd6f474a2fb2beab2bdc9a616e4fff97d1c8
| 24,197
|
py
|
Python
|
cosmo-field/.ipynb_checkpoints/make_2Dfield-checkpoint.py
|
tlmakinen/imnn
|
b3f4f3d39ca8ea88a86bc3a4295ff4e15e7e715d
|
[
"MIT"
] | null | null | null |
cosmo-field/.ipynb_checkpoints/make_2Dfield-checkpoint.py
|
tlmakinen/imnn
|
b3f4f3d39ca8ea88a86bc3a4295ff4e15e7e715d
|
[
"MIT"
] | null | null | null |
cosmo-field/.ipynb_checkpoints/make_2Dfield-checkpoint.py
|
tlmakinen/imnn
|
b3f4f3d39ca8ea88a86bc3a4295ff4e15e7e715d
|
[
"MIT"
] | null | null | null |
import argparse
import sys
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from IMNN.utils import TFRecords
from IMNN.LFI.LFI import GaussianApproximation
from FyeldGenerator import generate_field
__version__ = "0.2a5"
__author__ = "Lucas Makinen"
# HERE θ_fid is [A,B], yielding power spectrum P(k) = A k^-B
class GenerateCosmoField():
def __init__(self, input_shape=(1,128,128), n_params=2, n_summaries=2, n_s=1000, n_d=1000, n_d_small=100,
θ_fid=np.array([1.0, 0.5]), δθ=np.array([0.2, 0.1]), θ_fg=None, training_seed=0,
validation_seed=1):
self.input_shape = input_shape
self.n_params = n_params
self.n_summaries = n_summaries
self.n_s = n_s
self.n_d = n_d
self.n_d_small = n_d_small
self.θ_fid = θ_fid
self.δθ = δθ
self.half_δθ = δθ / 2.
self.training_seed = training_seed
self.validation_seed = validation_seed
self.θ_fg = θ_fg
def get_fiducial(self, seed, data):
return data[seed]
def get_derivative(self, seed, derivative, parameter, data):
return data[seed, derivative, parameter]
def check_selection(self, size):
if size not in ["full", "all", "small"]:
print("size must be `full`, `all` or `small` describing, respectively "
"whether just `n_d=n_s` is returned, or `n_d=n_s` and `n_d_small` "
"is returned, or `n_d=n_d_small` is returned.")
sys.exit()
def check_ftype(self, ftype):
if ftype not in ["both", "numpy", "tfrecords"]:
print("size must be `both`, `numpy` or `tfrecords` describing, respectively "
"whether both `numpy` and `tfrecords` files are saved, or just either one.")
sys.exit()
# Helper that generates power-law power spectrum
def Pkgen(self, n, amp=1):
def Pk(k):
return amp*np.power(k, -n)
return Pk
# Draw samples from a normal distribution
def distrib(self, shape):
a = np.random.normal(loc=0, scale=1, size=shape)
b = np.random.normal(loc=0, scale=1, size=shape)
return a + 1j * b
def simulator(self, parameters, θ_fg=None, seed=None, save_fg_copy=False,
fg_repeat=False,
simulator_args=None):
# if self.input_shape[0] // 3 != 0:
# raise AssertionError ("input shape must be divisible by coordinate dimensions !")
if seed is not None:
np.random.seed(seed)
if len(parameters.shape) == 1:
parameters = parameters[np.newaxis, :]
# if only looking at amplitude, fix power to 0.5
if self.n_params == 1:
parameters = np.repeat(parameters, 2, axis=1)
parameters[:, 1] = np.ones_like(parameters[:, 1])*0.5
d = np.array([generate_field(self.distrib, self.Pkgen(parameters[i,1], amp=parameters[i,0]), (self.input_shape[1], self.input_shape[2]))
for i in range(parameters.shape[0])])
if θ_fg is not None:
if fg_repeat:
θ_fg = np.repeat(
θ_fg[np.newaxis, :],
parameters.shape[0],
axis=0)
fg = np.array([generate_field(self.distrib, self.Pkgen(θ_fg[i,1],
amp=θ_fg[i,0]), (self.input_shape[1], self.input_shape[2]))
for i in range(parameters.shape[0])])
# return cosmo, fg separately
if save_fg_copy:
return d,fg
else:
# add cosmo and fg
d += fg; del fg
return np.expand_dims(d, axis=1)
# else return just cosmo
else:
d = np.expand_dims(d, axis=1)
return d
def generate_data(self, size="full"):
self.check_selection(size)
details = dict(
input_shape=self.input_shape,
n_params=self.n_params,
n_summaries=self.n_summaries,
n_s=self.n_s,
n_d=self.n_d,
θ_fid=self.θ_fid,
θ_fg = self.θ_fg,
δθ=self.δθ)
# if foregrounds present, expand params
if self.θ_fg is not None:
fg_parameters = np.repeat(
self.θ_fg[np.newaxis, :],
self.n_s,
axis=0)
else:
fg_parameters = None
# training base sims
a_0 = self.simulator(
parameters=np.repeat(
self.θ_fid[np.newaxis, :],
self.n_s,
axis=0),
θ_fg=fg_parameters,
seed=self.training_seed,
simulator_args={"input_shape": self.input_shape})
# validation base sims
a_1 = self.simulator(
parameters=np.repeat(
self.θ_fid[np.newaxis, :],
self.n_s,
axis=0),
θ_fg=fg_parameters,
seed=self.validation_seed,
simulator_args={"input_shape": self.input_shape})
# FOR NOW: TWO parameters: ONLY vary amp and power
# training -amp
b_0 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0] - self.half_δθ[0],
self.θ_fid[1],
])[np.newaxis, :],
self.n_d,
axis=0),
θ_fg=fg_parameters,
seed=self.training_seed,
simulator_args={"input_shape": self.input_shape})
# validation -amp
b_1 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0] - self.half_δθ[0],
self.θ_fid[1],
])[np.newaxis, :],
self.n_d,
axis=0),
θ_fg=fg_parameters,
seed=self.validation_seed,
simulator_args={"input_shape": self.input_shape})
# training +amp
c_0 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0] + self.half_δθ[0],
self.θ_fid[1],
])[np.newaxis, :],
self.n_d,
axis=0),
θ_fg=fg_parameters,
seed=self.training_seed,
simulator_args={"input_shape": self.input_shape})
# validation +amp
c_1 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0] + self.half_δθ[0],
self.θ_fid[1],
])[np.newaxis, :],
self.n_d,
axis=0),
θ_fg=fg_parameters,
seed=self.validation_seed,
simulator_args={"input_shape": self.input_shape})
# training -power
d_0 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0],
self.θ_fid[1] - self.half_δθ[1],
])[np.newaxis, :],
self.n_d,
axis=0),
θ_fg=fg_parameters,
seed=self.training_seed,
simulator_args={"input_shape": self.input_shape})
# validation -power
d_1 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0],
self.θ_fid[1] - self.half_δθ[1],
])[np.newaxis, :],
self.n_d,
axis=0),
θ_fg=fg_parameters,
seed=self.validation_seed,
simulator_args={"input_shape": self.input_shape})
# training +power
e_0 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0],
self.θ_fid[1] + self.half_δθ[1],
])[np.newaxis, :],
self.n_d,
axis=0),
θ_fg=fg_parameters,
seed=self.training_seed,
simulator_args={"input_shape": self.input_shape})
# validation +power
e_1 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0],
self.θ_fid[1] + self.half_δθ[1],
])[np.newaxis, :],
self.n_d,
axis=0),
θ_fg=fg_parameters,
seed=self.validation_seed,
simulator_args={"input_shape": self.input_shape})
print('b0 shape : ', b_0.shape)
f_0 = np.stack((np.stack((b_0, c_0)),
np.stack((d_0, e_0)))
).transpose(2, 1, 0, 3, 4, 5)
f_1 = np.stack((np.stack((b_1, c_1)),
np.stack((d_1, e_1)))
).transpose(2, 1, 0, 3, 4, 5)
result = (details, a_0, a_1, f_0, f_1)
if size == "all":
details["n_d_small"] = self.n_d_small
result += (f_0[:self.n_d_small],
f_1[:self.n_d_small])
elif size == "small":
details["n_d"] = self.n_d_small
result[-2] = f_0[:self.n_d_small]
result[-1] = f_1[:self.n_d_small]
return result
def save(self, ftype="both", size="full", directory="data", record_size=0.01):
self.check_ftype(ftype)
result = self.generate_data(size=size)
if (ftype=="both") or (ftype=="numpy"):
np.savez("{}/details.npz".format(directory), result[0])
np.save("{}/fiducial.npy".format(directory), result[1])
np.save("{}/validation_fiducial.npy".format(directory), result[2])
np.save("{}/derivative.npy".format(directory), result[3])
np.save("{}/validation_derivative.npy".format(directory), result[4])
if size == "all":
np.save("{}/derivative_small.npy".format(directory), result[5])
np.save("{}/validation_derivative_small.npy".format(directory), result[6])
if (ftype=="both") or (ftype=="tfrecords"):
writer = TFRecords.TFRecords(record_size=record_size)
writer.write_record(
n_sims=result[0]["n_s"],
get_simulation=lambda x : self.get_fiducial(x, result[1]),
fiducial=True,
directory="{}/tfrecords".format(directory))
writer.write_record(
n_sims=result[0]["n_s"],
get_simulation=lambda x : self.get_fiducial(x, result[2]),
fiducial=True,
validation=True,
directory="{}/tfrecords".format(directory))
writer.write_record(
n_sims=result[0]["n_d"],
get_simulation=lambda x, y, z : self.get_derivative(x, y, z, result[3]),
fiducial=False,
n_params=result[0]["n_params"],
directory="{}/tfrecords".format(directory))
writer.write_record(
n_sims=result[0]["n_d"],
get_simulation=lambda x, y, z : self.get_derivative(x, y, z, result[4]),
fiducial=False,
n_params=result[0]["n_params"],
validation=True,
directory="{}/tfrecords".format(directory))
if size == "all":
writer.write_record(
n_sims=result[0]["n_d_small"],
get_simulation=lambda x, y, z : self.get_derivative(x, y, z, result[5]),
fiducial=False,
n_params=result[0]["n_params"],
directory="{}/tfrecords".format(directory),
filename="derivative_small")
writer.write_record(
n_sims=result[0]["n_d_small"],
get_simulation=lambda x, y, z : self.get_derivative(x, y, z, result[6]),
fiducial=False,
n_params=result[0]["n_params"],
directory="{}/tfrecords".format(directory),
filename="derivative_small")
def plot_data(self, data, pars=[0,1], plot_fg=False, ax=None, label=None,
cmap='jet'):
if plot_fg:
pars = np.squeeze(pars)
labs = ['cosmo', 'foreground']
cosmo,fg = data
for i,d in enumerate([cosmo, fg]):
fig = plt.figure()
plt.imshow(np.squeeze(d), cmap=cmap)
plt.colorbar(label=r'$\delta$')
plt.title(r'%s field with $\theta_{\rm %s}=$(%.1f, %.1f)'%(labs[i], labs[i],
pars[i][0], pars[i][1]))
plt.show()
else:
pars = np.squeeze(pars)
fig = plt.figure()
plt.imshow(np.squeeze(data), cmap=cmap)
plt.colorbar(label=r'$\delta$')
plt.title(r'Gaussian field with $\theta_{\rm cosmo}=$(%.1f, %.1f)'%(pars[0], pars[1]))
return fig,ax # for further modification
class GenerateCosmoFieldOneParam():
def __init__(self, input_shape=(1,128,128), n_params=1, n_summaries=1, n_s=1000, n_d=1000, n_d_small=100,
θ_fid=np.array([1.0]), δθ=np.array([0.1]), θ_fg=None, training_seed=0,
validation_seed=1):
self.input_shape = input_shape
self.n_params = n_params
self.n_summaries = n_summaries
self.n_s = n_s
self.n_d = n_d
self.n_d_small = n_d_small
self.θ_fid = θ_fid
self.δθ = δθ
self.half_δθ = δθ / 2.
self.training_seed = training_seed
self.validation_seed = validation_seed
self.θ_fg = θ_fg
def get_fiducial(self, seed, data):
return data[seed]
def get_derivative(self, seed, derivative, parameter, data):
return data[seed, derivative, parameter]
def check_selection(self, size):
if size not in ["full", "all", "small"]:
print("size must be `full`, `all` or `small` describing, respectively "
"whether just `n_d=n_s` is returned, or `n_d=n_s` and `n_d_small` "
"is returned, or `n_d=n_d_small` is returned.")
sys.exit()
def check_ftype(self, ftype):
if ftype not in ["both", "numpy", "tfrecords"]:
print("size must be `both`, `numpy` or `tfrecords` describing, respectively "
"whether both `numpy` and `tfrecords` files are saved, or just either one.")
sys.exit()
# Helper that generates power-law power spectrum
def Pkgen(self, n, amp=1):
def Pk(k):
return amp*np.power(k, -n)
return Pk
# Draw samples from a normal distribution
def distrib(self, shape):
a = np.random.normal(loc=0, scale=1, size=shape)
b = np.random.normal(loc=0, scale=1, size=shape)
return a + 1j * b
def simulator(self, parameters, θ_fg=None, seed=None, save_fg_copy=False,
fg_repeat=False,
simulator_args=None):
# if self.input_shape[0] // 3 != 0:
# raise AssertionError ("input shape must be divisible by coordinate dimensions !")
if seed is not None:
np.random.seed(seed)
if len(parameters.shape) == 1:
parameters = parameters[np.newaxis, :]
# if only looking at amplitude, fix power to 0.5
if self.n_params == 1:
parameters = np.repeat(parameters, 2, axis=1)
parameters[:, 1] = np.ones_like(parameters[:, 1])*0.5
d = np.array([generate_field(self.distrib, self.Pkgen(parameters[i,1], amp=parameters[i,0]), (self.input_shape[1], self.input_shape[2]))
for i in range(parameters.shape[0])])
if θ_fg is not None:
if fg_repeat:
θ_fg = np.repeat(
θ_fg[np.newaxis, :],
parameters.shape[0],
axis=0)
fg = np.array([generate_field(self.distrib, self.Pkgen(θ_fg[i,1],
amp=θ_fg[i,0]), (self.input_shape[1], self.input_shape[2]))
for i in range(parameters.shape[0])])
# return cosmo, fg separately
if save_fg_copy:
return d,fg
else:
# add cosmo and fg
d += fg; del fg
return np.expand_dims(d, axis=1)
# else return just cosmo
else:
d = np.expand_dims(d, axis=1)
return d
def generate_data(self, size="full"):
self.check_selection(size)
details = dict(
input_shape=self.input_shape,
n_params=self.n_params,
n_summaries=self.n_summaries,
n_s=self.n_s,
n_d=self.n_d,
θ_fid=self.θ_fid,
θ_fg = self.θ_fg,
δθ=self.δθ)
# if foregrounds present, expand params
if self.θ_fg is not None:
fg_parameters = np.repeat(
self.θ_fg[np.newaxis, :],
self.n_s,
axis=0)
else:
fg_parameters = None
# training base sims
a_0 = self.simulator(
parameters=np.repeat(
self.θ_fid[np.newaxis, :],
self.n_s,
axis=0),
θ_fg=fg_parameters,
seed=self.training_seed,
simulator_args={"input_shape": self.input_shape})
# validation base sims
a_1 = self.simulator(
parameters=np.repeat(
self.θ_fid[np.newaxis, :],
self.n_s,
axis=0),
θ_fg=fg_parameters,
seed=self.validation_seed,
simulator_args={"input_shape": self.input_shape})
# FOR NOW: ONE parameters=: ONLY vary amp
# training -amp
b_0 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0] - self.half_δθ[0],
# self.θ_fid[1],
])[np.newaxis, :],
self.n_d,
axis=0),
θ_fg=fg_parameters,
seed=self.training_seed,
simulator_args={"input_shape": self.input_shape})
# validation -amp
b_1 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0] - self.half_δθ[0],
# self.θ_fid[1],
])[np.newaxis, :],
self.n_d,
axis=0),
θ_fg=fg_parameters,
seed=self.validation_seed,
simulator_args={"input_shape": self.input_shape})
# training +amp
c_0 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0] + self.half_δθ[0],
# self.θ_fid[1],
])[np.newaxis, :],
self.n_d,
axis=0),
θ_fg=fg_parameters,
seed=self.training_seed,
simulator_args={"input_shape": self.input_shape})
# validation +amp
c_1 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0] + self.half_δθ[0],
# self.θ_fid[1],
])[np.newaxis, :],
self.n_d,
axis=0),
θ_fg=fg_parameters,
seed=self.validation_seed,
simulator_args={"input_shape": self.input_shape})
f_0 = np.expand_dims(np.stack((b_0, c_0)).transpose(1, 0, 2, 3, 4), axis=-3)
f_1 = np.expand_dims(np.stack((b_1, c_1)).transpose(1, 0, 2, 3, 4), axis=-3)
result = (details, a_0, a_1, f_0, f_1)
if size == "all":
details["n_d_small"] = self.n_d_small
result += (f_0[:self.n_d_small],
f_1[:self.n_d_small])
elif size == "small":
details["n_d"] = self.n_d_small
result[-2] = f_0[:self.n_d_small]
result[-1] = f_1[:self.n_d_small]
return result
def save(self, ftype="both", size="full", directory="data", record_size=0.01):
self.check_ftype(ftype)
result = self.generate_data(size=size)
if (ftype=="both") or (ftype=="numpy"):
np.savez("{}/details.npz".format(directory), result[0])
np.save("{}/fiducial.npy".format(directory), result[1])
np.save("{}/validation_fiducial.npy".format(directory), result[2])
np.save("{}/derivative.npy".format(directory), result[3])
np.save("{}/validation_derivative.npy".format(directory), result[4])
if size == "all":
np.save("{}/derivative_small.npy".format(directory), result[5])
np.save("{}/validation_derivative_small.npy".format(directory), result[6])
if (ftype=="both") or (ftype=="tfrecords"):
writer = TFRecords.TFRecords(record_size=record_size)
writer.write_record(
n_sims=result[0]["n_s"],
get_simulation=lambda x : self.get_fiducial(x, result[1]),
fiducial=True,
directory="{}/tfrecords".format(directory))
writer.write_record(
n_sims=result[0]["n_s"],
get_simulation=lambda x : self.get_fiducial(x, result[2]),
fiducial=True,
validation=True,
directory="{}/tfrecords".format(directory))
writer.write_record(
n_sims=result[0]["n_d"],
get_simulation=lambda x, y, z : self.get_derivative(x, y, z, result[3]),
fiducial=False,
n_params=result[0]["n_params"],
directory="{}/tfrecords".format(directory))
writer.write_record(
n_sims=result[0]["n_d"],
get_simulation=lambda x, y, z : self.get_derivative(x, y, z, result[4]),
fiducial=False,
n_params=result[0]["n_params"],
validation=True,
directory="{}/tfrecords".format(directory))
if size == "all":
writer.write_record(
n_sims=result[0]["n_d_small"],
get_simulation=lambda x, y, z : self.get_derivative(x, y, z, result[5]),
fiducial=False,
n_params=result[0]["n_params"],
directory="{}/tfrecords".format(directory),
filename="derivative_small")
writer.write_record(
n_sims=result[0]["n_d_small"],
get_simulation=lambda x, y, z : self.get_derivative(x, y, z, result[6]),
fiducial=False,
n_params=result[0]["n_params"],
directory="{}/tfrecords".format(directory),
filename="derivative_small")
def plot_data(self, data, pars=[0,1], plot_fg=False, ax=None, label=None,
cmap='jet'):
if plot_fg:
pars = np.squeeze(pars)
labs = ['cosmo', 'foreground']
cosmo,fg = data
for i,d in enumerate([cosmo, fg]):
fig = plt.figure()
plt.imshow(np.squeeze(d), cmap=cmap)
plt.colorbar(label=r'$\delta$')
plt.title(r'%s field with $\theta_{\rm %s}=$(%.1f, %.1f)'%(labs[i], labs[i],
pars[i][0], pars[i][1]))
plt.show()
else:
pars = np.squeeze(pars)
fig = plt.figure()
plt.imshow(np.squeeze(data), cmap=cmap)
plt.colorbar(label=r'$\delta$')
plt.title(r'Gaussian field with $\theta_{\rm cosmo}=$(%.1f, %.1f)'%(pars[0], pars[1]))
return fig,ax # for further modification
| 37.631415
| 145
| 0.503823
| 2,953
| 24,197
| 3.942093
| 0.071114
| 0.010652
| 0.038485
| 0.029379
| 0.958681
| 0.957478
| 0.951636
| 0.949059
| 0.945795
| 0.945795
| 0
| 0.02186
| 0.370459
| 24,197
| 642
| 146
| 37.690031
| 0.742336
| 0.052445
| 0
| 0.947776
| 0
| 0
| 0.084295
| 0.009701
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046422
| false
| 0
| 0.015474
| 0.011605
| 0.104449
| 0.009671
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
852a0a53e0dbcc3d7ecbe6d5d7fcc5f1a5358859
| 2,180
|
py
|
Python
|
netutils_linux_monitoring/softnet_stat_test.py
|
AlexeyAB/netutils-linux
|
f97a919ecd765c50c364415ba43eeb09e8e829ed
|
[
"MIT"
] | 1
|
2019-02-09T23:37:41.000Z
|
2019-02-09T23:37:41.000Z
|
netutils_linux_monitoring/softnet_stat_test.py
|
AlexeyAB/netutils-linux
|
f97a919ecd765c50c364415ba43eeb09e8e829ed
|
[
"MIT"
] | null | null | null |
netutils_linux_monitoring/softnet_stat_test.py
|
AlexeyAB/netutils-linux
|
f97a919ecd765c50c364415ba43eeb09e8e829ed
|
[
"MIT"
] | 1
|
2020-05-28T07:47:20.000Z
|
2020-05-28T07:47:20.000Z
|
#!/usr/bin/env python
import unittest
from netutils_linux_monitoring.softnet_stat import SoftnetStat
class SoftnetStatTests(unittest.TestCase):
first = """
9d3cbd5e 00000000 0000004d 00000000 00000000 00000000 00000000 00000000 00000000 00000000
301350a8 00000000 00000025 00000000 00000000 00000000 00000000 00000000 00000000 00000000
2102d7a3 00000000 00000021 00000000 00000000 00000000 00000000 00000000 00000000 00000000
1d208d3b 00000000 00000021 00000000 00000000 00000000 00000000 00000000 00000000 00000000
6ba194e0 00000000 0000002b 00000000 00000000 00000000 00000000 00000000 00000000 00000000
25ef7e5f 00000000 0000001f 00000000 00000000 00000000 00000000 00000000 00000000 00000000
178ea501 00000000 0000001e 00000000 00000000 00000000 00000000 00000000 00000000 00000000
16882427 00000000 00000029 00000000 00000000 00000000 00000000 00000000 00000000 00000000
"""
second = """
9d3cebfe 00000000 0000004d 00000000 00000000 00000000 00000000 00000000 00000000 00000000
30135354 00000000 00000025 00000000 00000000 00000000 00000000 00000000 00000000 00000000
2102d995 00000000 00000021 00000000 00000000 00000000 00000000 00000000 00000000 00000000
1d208e70 00000000 00000021 00000000 00000000 00000000 00000000 00000000 00000000 00000000
6ba1984a 00000000 0000002b 00000000 00000000 00000000 00000000 00000000 00000000 00000000
25ef7f6f 00000000 0000001f 00000000 00000000 00000000 00000000 00000000 00000000 00000000
178ed754 00000000 0000001e 00000000 00000000 00000000 00000000 00000000 00000000 00000000
168824ff 00000000 00000029 00000000 00000000 00000000 00000000 00000000 00000000 00000000
"""
def test_delta(self):
__first = [SoftnetStat().parse_string(row, cpu) for cpu, row in enumerate(self.first.strip().split('\n'))]
__second = [SoftnetStat().parse_string(row, cpu) for cpu, row in enumerate(self.second.strip().split('\n'))]
delta = __second[0] - __first[0]
data = [0, 11936, 0, 0, 0, 0]
expected = SoftnetStat().parse_list(data)
self.assertEqual(delta, expected)
if __name__ == '__main__':
unittest.main()
| 53.170732
| 116
| 0.78578
| 242
| 2,180
| 6.983471
| 0.268595
| 0.908876
| 1.136095
| 1.211834
| 0.743195
| 0.743195
| 0.743195
| 0.743195
| 0.743195
| 0.061538
| 0
| 0.685022
| 0.166972
| 2,180
| 40
| 117
| 54.5
| 0.245595
| 0.009174
| 0
| 0.0625
| 0
| 0
| 0.706809
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 1
| 0.03125
| false
| 0
| 0.0625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
a401d37a3904e37f2bb54984a0e752e075c0f428
| 133
|
py
|
Python
|
allennlp_models/lm/util/beam_search_generators/__init__.py
|
shunk031/allennlp-models
|
d37c5fadeef9326808a84dda0bcfd210a078d6b1
|
[
"Apache-2.0"
] | 402
|
2020-03-11T22:58:35.000Z
|
2022-03-29T09:05:27.000Z
|
allennlp_models/lm/util/beam_search_generators/__init__.py
|
staceywhitmore-inl/allennlp-models
|
1e89d5e51cb45f3e77a48d4983bf980088334fac
|
[
"Apache-2.0"
] | 116
|
2020-03-11T01:26:57.000Z
|
2022-03-25T13:03:56.000Z
|
allennlp_models/lm/util/beam_search_generators/__init__.py
|
staceywhitmore-inl/allennlp-models
|
1e89d5e51cb45f3e77a48d4983bf980088334fac
|
[
"Apache-2.0"
] | 140
|
2020-03-11T00:51:35.000Z
|
2022-03-29T09:05:36.000Z
|
from .beam_search_generator import BeamSearchGenerator
from .transformer_beam_search_generator import TransformerBeamSearchGenerator
| 44.333333
| 77
| 0.924812
| 13
| 133
| 9.076923
| 0.615385
| 0.169492
| 0.322034
| 0.423729
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06015
| 133
| 2
| 78
| 66.5
| 0.944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cf9c8d6cacd23591b18e87d6426df08973b59679
| 5,907
|
py
|
Python
|
test/test_tasks_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
test/test_tasks_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
test/test_tasks_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import octopus_deploy_swagger_client
from octopus_deploy_client.tasks_api import TasksApi # noqa: E501
from octopus_deploy_swagger_client.rest import ApiException
class TestTasksApi(unittest.TestCase):
"""TasksApi unit test stubs"""
def setUp(self):
self.api = octopus_deploy_client.tasks_api.TasksApi() # noqa: E501
def tearDown(self):
pass
def test_create_response_descriptor_server_tasks_server_task_task_resource(self):
"""Test case for create_response_descriptor_server_tasks_server_task_task_resource
Create a TaskResource # noqa: E501
"""
pass
def test_create_response_descriptor_server_tasks_server_task_task_resource_spaces(self):
"""Test case for create_response_descriptor_server_tasks_server_task_task_resource_spaces
Create a TaskResource # noqa: E501
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_task_cancel_responder(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_task_cancel_responder
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_task_cancel_responder_spaces(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_task_cancel_responder_spaces
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_task_details_responder(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_task_details_responder
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_task_details_responder_spaces(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_task_details_responder_spaces
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_task_raw_responder(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_task_raw_responder
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_task_raw_responder_spaces(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_task_raw_responder_spaces
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_task_rerun_responder(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_task_rerun_responder
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_task_rerun_responder_spaces(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_task_rerun_responder_spaces
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_task_state_responder(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_task_state_responder
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_task_state_responder_spaces(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_task_state_responder_spaces
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_task_types_responder(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_list_task_types_responder
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_task_types_responder_spaces(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_list_task_types_responder_spaces
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_tasks_responder(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_list_tasks_responder
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_tasks_responder_spaces(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_list_tasks_responder_spaces
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_task_queued_behind_responder(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_task_queued_behind_responder
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_task_queued_behind_responder_spaces(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_task_queued_behind_responder_spaces
"""
pass
def test_load_response_descriptor_server_tasks_server_task_task_resource(self):
"""Test case for load_response_descriptor_server_tasks_server_task_task_resource
Get a TaskResource by ID # noqa: E501
"""
pass
def test_load_response_descriptor_server_tasks_server_task_task_resource_spaces(self):
"""Test case for load_response_descriptor_server_tasks_server_task_task_resource_spaces
Get a TaskResource by ID # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 37.150943
| 124
| 0.792619
| 753
| 5,907
| 5.568393
| 0.108898
| 0.171715
| 0.190794
| 0.236585
| 0.891009
| 0.87813
| 0.87813
| 0.860005
| 0.832817
| 0.832817
| 0
| 0.012129
| 0.162519
| 5,907
| 158
| 125
| 37.386076
| 0.835456
| 0.454207
| 0
| 0.403846
| 1
| 0
| 0.002648
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.423077
| false
| 0.403846
| 0.096154
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 12
|
5c81ff575a85dd75b6fae1367a858461644db3c3
| 10,919
|
py
|
Python
|
optking/tests/test_deriv_transforms.py
|
psi-rking/optking
|
6f113db58e733b6a56929a2b890f9dae0092995c
|
[
"BSD-3-Clause"
] | 12
|
2018-02-06T22:02:12.000Z
|
2022-01-06T09:26:44.000Z
|
optking/tests/test_deriv_transforms.py
|
psi-rking/optking
|
6f113db58e733b6a56929a2b890f9dae0092995c
|
[
"BSD-3-Clause"
] | 51
|
2017-11-22T16:00:02.000Z
|
2021-12-23T20:49:56.000Z
|
optking/tests/test_deriv_transforms.py
|
psi-rking/optking
|
6f113db58e733b6a56929a2b890f9dae0092995c
|
[
"BSD-3-Clause"
] | 9
|
2017-11-21T19:55:46.000Z
|
2022-02-28T06:09:11.000Z
|
#! Test gradient and Hessian transformations
import psi4
import qcelemental as qcel
import numpy as np
import optking
from optking import bend, stre, tors
#psi4.core.set_output_file('psi-output.dat')
def test_stationary_forces_h2o():
mol = psi4.geometry("""
0 1
O 0.0000000000 -0.0000000000 0.0025968676
H 0.0000000000 -0.7487897072 0.5811909492
H -0.0000000000 0.7487897072 0.5811909492
unit Angstrom
""")
psi4_options = {
"basis": "cc-pvdz",
"scf_type": "pk"
}
psi4.set_options(psi4_options)
mol.update_geometry()
Natom = mol.natom()
xyz = mol.geometry().to_array()
# Make an optking molecule manually, and you can choose your specific
# desired internal coordinates.
coords = [
stre.Stre(0, 1), # from-zero indexed atoms in geometry
stre.Stre(0, 2),
bend.Bend(0, 1, 2),
]
Z = [mol.Z(i) for i in range(0, Natom)]
masses = [mol.mass(i) for i in range(0, Natom)]
f1 = optking.frag.Frag(Z, xyz, masses, intcos=coords, frozen=False)
OptMol = optking.molsys.Molsys([f1])
#psi4.core.print_out(str(OptMol))
grad_x = psi4.gradient("hf") # returns an (N,3) psi4 matrix
#rms = grad_x.rms()
# Print gradient as psi4 matrix.
#psi4.core.print_out(f"Cartesian Gradient (RMS={rms:.3e})\n")
#grad_x.print_out()
# Print gradient as numpy array.
#print(f"Cartesian Gradient (RMS={rms:.3e})")
#print(grad_x.to_array())
grad_x = grad_x.to_array()
grad_q = OptMol.gradient_to_internals(grad_x.flatten(), useMasses=False) # returns ndarray
grad_x2 = OptMol.gradient_to_cartesians(grad_q).reshape(Natom,3)
grad_x2 = psi4.core.Matrix.from_array(grad_x2)
#psi4.core.print_out("Internal Coordinate Gradient:\n"+str(grad_q)+"\n")
#rms = grad_x2.rms()
#psi4.core.print_out(f"Cartesian Gradient (RMS={rms:.3e})\n")
#grad_x2.print_out()
#print(f"Cartesian Gradient (RMS={rms:.3e})")
#print(grad_x2.to_array())
#rms_diff = np.sqrt(np.mean((grad_x - grad_x2)**2))
#print(f"RMS diff gradient, cart->int->cart: {rms_diff:8.4e}")
assert psi4.compare_values(grad_x, grad_x2, 10, "Diff grad. CART->int->CART")
grad_q = OptMol.gradient_to_internals(grad_x.flatten(), useMasses=True)
grad_x2 = OptMol.gradient_to_cartesians(grad_q).reshape(Natom,3)
grad_x2 = psi4.core.Matrix.from_array(grad_x2)
assert psi4.compare_values(grad_x, grad_x2, 10, "Diff grad. CART->int->CART with u=1/mass")
def test_stationary_hessian_h2o():
mol = psi4.geometry("""
0 1
O 0.0000000000 -0.0000000000 0.0025968676
H 0.0000000000 -0.7487897072 0.5811909492
H -0.0000000000 0.7487897072 0.5811909492
unit Angstrom
""")
psi4_options = {
"basis": "cc-pvdz",
"scf_type": "pk"
}
psi4.set_options(psi4_options)
mol.update_geometry()
Natom = mol.natom()
xyz = mol.geometry().to_array()
coords = [
stre.Stre(0, 1),
stre.Stre(0, 2),
bend.Bend(0, 1, 2),
]
Z = [mol.Z(i) for i in range(0, Natom)]
masses = [mol.mass(i) for i in range(0, Natom)]
f1 = optking.frag.Frag(Z, xyz, masses, intcos=coords, frozen=False)
OptMol = optking.molsys.Molsys([f1])
# Compute the Cartesian Hessian with psi4
H_xy = psi4.hessian("hf") # returns a (3N,3N) psi4 matrix
#psi4.core.print_out("Calculated Cartesian hessian\n")
#H_xy.print_out()
# Transform hessian to internals with optking
H_q = OptMol.hessian_to_internals(H_xy.to_array()) #returns ndarray
#psi4.core.print_out(f"Hessian transformed into internal coordinates\n")
#psi4.core.Matrix.from_array(H_q).print_out()
## Transform hessian to Cartesians with optking
H_xy2 = OptMol.hessian_to_cartesians(H_q) # returns ndarray
#print("Hessian transformed back into Cartesian coordinates")
#print(H_xy2)
assert psi4.compare_values(H_xy, H_xy2, 7, "Diff hessian CART->int->CART")
H_q = OptMol.hessian_to_internals(H_xy.to_array(), useMasses=True)
H_xy2 = OptMol.hessian_to_cartesians(H_q)
assert psi4.compare_values(H_xy, H_xy2, 7, "Diff hessian CART->int->CART with u=1/mass")
def test_nonstationary_forces_h2o():
mol = psi4.geometry("""
0 1
O 0.0 -0.00 0.00
H 0.0 -0.75 0.58
H 0.0 0.75 0.58
unit Angstrom
""")
psi4_options = {
"basis": "cc-pvdz",
"scf_type": "pk"
}
psi4.set_options(psi4_options)
mol.update_geometry()
Natom = mol.natom()
xyz = mol.geometry().to_array()
coords = [
stre.Stre(0, 1),
stre.Stre(0, 2),
bend.Bend(0, 1, 2)
]
Z = [mol.Z(i) for i in range(0, Natom)]
masses = [mol.mass(i) for i in range(0, Natom)]
f1 = optking.frag.Frag(Z, xyz, masses, intcos=coords, frozen=False)
OptMol = optking.molsys.Molsys([f1])
grad_x = psi4.gradient("hf").to_array()
grad_q = OptMol.gradient_to_internals(grad_x.flatten(), useMasses=False)
grad_x2 = OptMol.gradient_to_cartesians(grad_q).reshape(Natom,3)
grad_x2 = psi4.core.Matrix.from_array(grad_x2)
assert psi4.compare_values(grad_x, grad_x2, 10, "Diff grad. CART->int->CART")
grad_q = OptMol.gradient_to_internals(grad_x.flatten(), useMasses=True)
grad_x2 = OptMol.gradient_to_cartesians(grad_q).reshape(Natom,3)
grad_x2 = psi4.core.Matrix.from_array(grad_x2)
assert psi4.compare_values(grad_x, grad_x2, 10, "Diff grad. CART->int->CART with u=1/mass")
def test_nonstationary_hessian_h2o():
mol = psi4.geometry("""
0 1
O 0.0 -0.00 0.00
H 0.0 -0.75 0.58
H 0.0 0.75 0.58
unit Angstrom
""")
psi4_options = {
"basis": "cc-pvdz",
"scf_type": "pk"
}
psi4.set_options(psi4_options)
mol.update_geometry()
Natom = mol.natom()
xyz = mol.geometry().to_array()
coords = [
stre.Stre(0, 1),
stre.Stre(0, 2),
bend.Bend(0, 1, 2),
]
Z = [mol.Z(i) for i in range(0, Natom)]
masses = [mol.mass(i) for i in range(0, Natom)]
f1 = optking.frag.Frag(Z, xyz, masses, intcos=coords, frozen=False)
OptMol = optking.molsys.Molsys([f1])
grad_x = psi4.gradient("hf").to_array().flatten()
H_xy = psi4.hessian("hf")
H_q = OptMol.hessian_to_internals(H_xy.to_array(), grad_x, useMasses=False)
grad_q = OptMol.gradient_to_internals(grad_x, useMasses=False)
H_xy2 = OptMol.hessian_to_cartesians(H_q, grad_q)
assert psi4.compare_values(H_xy, H_xy2, 8, "Diff hessian CART->int->CART")
H_q = OptMol.hessian_to_internals(H_xy.to_array(), grad_x, useMasses=True)
grad_q = OptMol.gradient_to_internals(grad_x, useMasses=True)
H_xy2 = OptMol.hessian_to_cartesians(H_q, grad_q)
assert psi4.compare_values(H_xy, H_xy2, 8, "Diff hessian CART->int->CART with u=1/masses")
def test_stationary_forces_hooh():
mol = psi4.geometry("""
0 1
H 0.9047154509 0.7748902860 0.4679224940
O 0.1020360382 0.6887430144 -0.0294829672
O -0.1020360382 -0.6887430144 -0.0294829672
H -0.9047154509 -0.7748902860 0.4679224940
""")
mol.update_geometry()
psi4_options = {
"basis": "cc-pvdz",
"scf_type": "pk"
}
psi4.set_options(psi4_options)
xyz = mol.geometry().to_array()
Natom = mol.natom()
coords = [
stre.Stre(0, 1),
stre.Stre(2, 3),
bend.Bend(0, 1, 2),
bend.Bend(1, 2, 3),
tors.Tors(0, 1, 2, 3)
]
Z = [mol.Z(i) for i in range(0, Natom)]
masses = [mol.mass(i) for i in range(0, Natom)]
f1 = optking.frag.Frag(Z, xyz, masses, intcos=coords, frozen=False)
OptMol = optking.molsys.Molsys([f1])
grad_x = psi4.gradient("hf").to_array()
grad_q = OptMol.gradient_to_internals(grad_x.flatten(), useMasses=False)
grad_x2 = OptMol.gradient_to_cartesians(grad_q).reshape(Natom,3)
assert psi4.compare_values(grad_x, grad_x2, 8, "Diff grad. CART->int->CART")
grad_q = OptMol.gradient_to_internals(grad_x.flatten(), useMasses=True)
grad_x2 = OptMol.gradient_to_cartesians(grad_q).reshape(Natom,3)
assert psi4.compare_values(grad_x, grad_x2, 8, "Diff grad. CART->int->CART with u=1/mass")
def test_stationary_hessian_hooh():
mol = psi4.geometry("""
0 1
H 0.9047154509 0.7748902860 0.4679224940
O 0.1020360382 0.6887430144 -0.0294829672
O -0.1020360382 -0.6887430144 -0.0294829672
H -0.9047154509 -0.7748902860 0.4679224940
""")
mol.update_geometry()
Natom = mol.natom()
psi4_options = {
"basis": "cc-pvdz",
"scf_type": "pk"
}
psi4.set_options(psi4_options)
xyz = mol.geometry().to_array()
coords = [
stre.Stre(0, 1),
stre.Stre(2, 3),
bend.Bend(0, 1, 2),
bend.Bend(1, 2, 3),
tors.Tors(0, 1, 2, 3)
]
Z = [mol.Z(i) for i in range(0, Natom)]
masses = [mol.mass(i) for i in range(0, Natom)]
f1 = optking.frag.Frag(Z, xyz, masses, intcos=coords, frozen=False)
OptMol = optking.molsys.Molsys([f1])
H_xy = psi4.hessian("hf").to_array()
H_q = OptMol.hessian_to_internals(H_xy, useMasses=False)
H_xy2 = OptMol.hessian_to_cartesians(H_q)
H_q2 = OptMol.hessian_to_internals(H_xy2, useMasses=False)
assert psi4.compare_values(H_q, H_q2, 7, "Diff hessian cart->INT->cart->INT")
# Cartesians do not agree due to implicit rotations
#assert psi4.compare_values(H_xy, H_xy2, 7, "Diff hessian cart->int->cart")
def test_nonstationary_hessian_hooh():
mol = psi4.geometry("""
0 1
H 0.90 0.77 0.46
O 0.10 0.68 -0.02
O -0.10 -0.68 -0.02
H -0.90 -0.77 0.46
""")
mol.update_geometry()
Natom = mol.natom()
psi4_options = {
"basis": "cc-pvdz",
"scf_type": "pk"
}
psi4.set_options(psi4_options)
xyz = mol.geometry().to_array()
coords = [
stre.Stre(0, 1),
stre.Stre(2, 3),
bend.Bend(0, 1, 2),
bend.Bend(1, 2, 3),
tors.Tors(0, 1, 2, 3)
]
Z = [mol.Z(i) for i in range(0, Natom)]
masses = [mol.mass(i) for i in range(0, Natom)]
f1 = optking.frag.Frag(Z, xyz, masses, intcos=coords, frozen=False)
OptMol = optking.molsys.Molsys([f1])
grad_x = psi4.gradient("hf").to_array().flatten()
H_xy = psi4.hessian("hf").to_array()
grad_q = OptMol.gradient_to_internals(grad_x)
H_q = OptMol.hessian_to_internals(H_xy, grad_x)
H_xy2 = OptMol.hessian_to_cartesians(H_q, grad_q)
H_q2 = OptMol.hessian_to_internals(H_xy2, grad_x)
assert psi4.compare_values(H_q, H_q2, 7, "Diff hessian cart->INT->cart->INT")
# Cartesians do not agree due to implicit rotations
#assert psi4.compare_values(H_xy, H_xy2, 7, "Diff hessian CART->int->CART")
| 34.121875
| 95
| 0.632567
| 1,678
| 10,919
| 3.942789
| 0.091776
| 0.022672
| 0.036276
| 0.014813
| 0.862455
| 0.838271
| 0.83283
| 0.823609
| 0.80925
| 0.7698
| 0
| 0.096061
| 0.225845
| 10,919
| 319
| 96
| 34.22884
| 0.68662
| 0.14342
| 0
| 0.792531
| 0
| 0
| 0.200279
| 0
| 0
| 0
| 0
| 0
| 0.049793
| 1
| 0.029046
| false
| 0
| 0.020747
| 0
| 0.049793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7a61352992a693f353306f117c51b40fd97f3259
| 131
|
py
|
Python
|
Parsers/interface.py
|
levozavr/finance-ml
|
efc23e664821861fa03bb91fc94448b6fb14636e
|
[
"Apache-2.0"
] | null | null | null |
Parsers/interface.py
|
levozavr/finance-ml
|
efc23e664821861fa03bb91fc94448b6fb14636e
|
[
"Apache-2.0"
] | 10
|
2018-10-02T07:29:32.000Z
|
2018-12-22T16:02:31.000Z
|
Parsers/interface.py
|
levozavr/finance-ml
|
efc23e664821861fa03bb91fc94448b6fb14636e
|
[
"Apache-2.0"
] | null | null | null |
class ParserInterface():
def open(self):
pass
def get_data(self):
pass
def close(self):
pass
| 13.1
| 24
| 0.534351
| 15
| 131
| 4.6
| 0.6
| 0.347826
| 0.318841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.374046
| 131
| 9
| 25
| 14.555556
| 0.841463
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0.428571
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
7a72e186b5a16b56b6ed0e2af9b948f1d3aa32f9
| 135
|
py
|
Python
|
tests/test_cipher_nkd2120.py
|
QMSS-G5072-2020/cipher_longuetmarx_nicolas
|
68e5c1c462f9207cb120b7de8d93934f3c92df56
|
[
"MIT"
] | null | null | null |
tests/test_cipher_nkd2120.py
|
QMSS-G5072-2020/cipher_longuetmarx_nicolas
|
68e5c1c462f9207cb120b7de8d93934f3c92df56
|
[
"MIT"
] | null | null | null |
tests/test_cipher_nkd2120.py
|
QMSS-G5072-2020/cipher_longuetmarx_nicolas
|
68e5c1c462f9207cb120b7de8d93934f3c92df56
|
[
"MIT"
] | null | null | null |
from cipher_nkd2120 import __version__
from cipher_nkd2120 import cipher_nkd2120
def test_version():
assert __version__ == '3.11'
| 22.5
| 41
| 0.8
| 18
| 135
| 5.333333
| 0.555556
| 0.40625
| 0.354167
| 0.479167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12931
| 0.140741
| 135
| 5
| 42
| 27
| 0.698276
| 0
| 0
| 0
| 0
| 0
| 0.02963
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
8fb5ce47b7a5ee75f60ec2ad7a4ec5dffa020236
| 349
|
py
|
Python
|
tests/internal/instance_type/test_instance_type_r6_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
tests/internal/instance_type/test_instance_type_r6_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
tests/internal/instance_type/test_instance_type_r6_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | 1
|
2021-12-15T11:58:22.000Z
|
2021-12-15T11:58:22.000Z
|
# Testing module instance_type.r6
import pytest
import ec2_compare.internal.instance_type.r6
def test_get_internal_data_instance_type_r6_get_instances_list():
assert len(ec2_compare.internal.instance_type.r6.get_instances_list()) > 0
def test_get_internal_data_instance_type_r6_get():
assert len(ec2_compare.internal.instance_type.r6.get) > 0
| 34.9
| 76
| 0.848138
| 56
| 349
| 4.839286
| 0.339286
| 0.265683
| 0.309963
| 0.250923
| 0.826568
| 0.826568
| 0.612546
| 0.612546
| 0.612546
| 0
| 0
| 0.034056
| 0.074499
| 349
| 9
| 77
| 38.777778
| 0.804954
| 0.088825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.