hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
51b68ff9d4c3d33140d8c2991ab00d674fa5b187
| 10,482
|
py
|
Python
|
app/tests/api/test_users.py
|
damnever/2L
|
35dab73106e5879155a647b31c81ae5dea18b89d
|
[
"BSD-3-Clause"
] | 1
|
2019-03-11T12:16:30.000Z
|
2019-03-11T12:16:30.000Z
|
app/tests/api/test_users.py
|
damnever/2L
|
35dab73106e5879155a647b31c81ae5dea18b89d
|
[
"BSD-3-Clause"
] | 1
|
2017-04-16T02:11:58.000Z
|
2017-04-16T02:11:58.000Z
|
app/tests/api/test_users.py
|
damnever/2L
|
35dab73106e5879155a647b31c81ae5dea18b89d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import json
import mox
from app.models import User, Following, Blocked
from app.settings import Level
from app.base.handlers import APIHandler
from app.tests.base import BaseTestCase
DATA = {
'username': 'godfather',
'password': '1donotknow',
'email': 'god@father.com',
'introduce': 'I am God Father',
'location': 'ChangSha',
'wiki': ('## God\n You do not know God.\n'
'## Father\n Yes, I am a father, my son is God'),
'blog': 'http://damnever.github.io/',
'github': 'https://github.com/Damnever',
'google': 'https://plus.google.com/u/0/112029405049071050730',
'weibo': 'http://weibo.com/u/1977691952',
'twitter': 'https://twitter.com/WolfDxc',
}
class UsersTests(BaseTestCase):
_path = '/api/users/{0}'
def setUp(self):
super(UsersTests, self).setUp()
self._data = DATA
User.create(**self._data)
def test_get_user_info(self):
r = self.get(self._path.format(self._data['username']))
data = self._data
data.update({
'status': 1,
'id': 1,
'avatar': '',
'gold': Level['Gold']['Register'],
})
self.assertEqual(r.code, 200)
self.assertDictEqual(json.loads(r.body), data)
def test_username_not_exists_error(self):
r = self.get(self._path.format('notexists'))
self.assertEqual(r.code, 200)
self.assertDictEqual(
json.loads(r.body),
{'status': 0, 'reason': 'Username does not exists'},
)
class UserTests(BaseTestCase):
_path = '/api/user'
def setUp(self):
super(UsersTests, self).setUp()
self._mox = mox.Mox()
self._me = DATA
User.create(**self._me)
self._mox.StubOutWithMock(APIHandler,
'get_current_user', use_mock_anything=True)
APIHandler.get_current_user().AndReturn(self._me['username'])
self._mox.ReplayAll()
def tearDown(self):
super(UsersTests, self).tearDown()
self._mox.UnsetStubs()
def test_get_self_info(self):
r = self.get(self._path)
data = self._me
data.update({
'status': 1,
'id': 1,
'avatar': '',
'gold': Level['Gold']['Register'],
'following': 0,
'blocked': 0,
'following_url': '/api/user/following',
'blocked_url': '/api/user/blocked',
})
self.assertEqual(r.code, 200)
self.assertDictEqual(json.loads(r.body), data)
self._mox.VerifyAll()
def test_update_self_info(self):
data = {
'introduce': 'GodFather is me',
'location': 'ChangSha, China',
}
r = self.patch(self._path, body=data)
body = self._other
body.update(data)
body.update({
'status': 1,
'id': 1,
'avatar': '',
'gold': Level['Gold']['Register'],
'following': 0,
'blocked': 0,
'following_url': '/api/user/following',
'blocked_url': '/api/user/blocked',
})
self.assertEqual(r.code, 200)
self.assertDictEqual(json.loads(r.body), body)
self._mox.VerifyAll()
class FollowingTests(BaseTestCase):
_path = '/api/user/following'
def setUp(self):
super(UsersTests, self).setUp()
self._mox = mox.Mox()
self._other = DATA
self._me = {
'username': 'root',
'password': 'utellme',
'email': 'root@os.com',
}
User.create(**self._me)
User.create(**self._other)
Following.create(self._me['username'], self._other['username'])
self._mox.StubOutWithMock(APIHandler,
'get_current_user', use_mock_anything=True)
APIHandler.get_current_user().AndReturn(self._me['username'])
self._mox.ReplayAll()
def tearDown(self):
super(UsersTests, self).tearDown()
self._mox.UnsetStubs()
def get_followings(self):
body = self._other
body.update({
'status': 1,
'id': 1,
'avatar': '',
'gold': Level['Gold']['Register'],
'following': 0,
'blocked': 0,
'following_url': '/api/user/following',
'blocked_url': '/api/user/blocked',
})
r = self.get(self._path)
self.assertEqual(r.code, 200)
self.assertDictEqual(json.loads(r.body), body)
self._mox.VerifyAll()
class FollowOneTests(BaseTestCase):
_path = '/api/user/follow/{0}'
def setUp(self):
super(UsersTests, self).setUp()
self._mox = mox.Mox()
self._other = DATA
self._me = {
'username': 'root',
'password': 'utellme',
'email': 'root@os.com',
}
User.create(**self._me)
User.create(**self._other)
self._mox.StubOutWithMock(APIHandler,
'get_current_user', use_mock_anything=True)
APIHandler.get_current_user().AndReturn(self._me['username'])
self._mox.ReplayAll()
def tearDown(self):
super(UsersTests, self).tearDown()
self._mox.UnsetStubs()
def test_follow_one(self):
r = self.post(self._path.format(self._other['username']))
self.assertEqual(r.code, 200)
self.assertDictEqual(json.loads(r.body), {'status': 1})
exists = Following.get_by_user_following(
self._me['username'], self._other['username'])
self.assertIsNotNone(exists)
self._mox.VerifyAll()
class UnfollowOneTests(BaseTestCase):
_path = '/api/user/follow/{0}'
def setUp(self):
super(UsersTests, self).setUp()
self._mox = mox.Mox()
self._other = DATA
self._me = {
'username': 'root',
'password': 'utellme',
'email': 'root@os.com',
}
User.create(**self._me)
User.create(**self._other)
Following.create(self._me['username'], self._other['username'])
self._mox.StubOutWithMock(APIHandler,
'get_current_user', use_mock_anything=True)
APIHandler.get_current_user().AndReturn(self._me['username'])
self._mox.ReplayAll()
def tearDown(self):
super(UsersTests, self).tearDown()
self._mox.UnsetStubs()
def test_unfollow_one(self):
r = self.delete(self._path.format(self._other['username']))
self.assertEqual(r.code, 200)
self.assertDictEqual(json.loads(r.body), {'status': 1})
exists = Following.get_by_user_following(
self._me['username'], self._other['username'])
self.assertIsNone(exists)
self._mox.VerifyAll()
class BlockedTests(BaseTestCase):
_path = '/api/user/blocked'
def setUp(self):
super(UsersTests, self).setUp()
self._mox = mox.Mox()
self._other = DATA
self._me = {
'username': 'root',
'password': 'utellme',
'email': 'root@os.com',
}
User.create(**self._me)
User.create(**self._other)
Blocked.create(self._me['username'], self._other['username'])
self._mox.StubOutWithMock(APIHandler,
'get_current_user', use_mock_anything=True)
APIHandler.get_current_user().AndReturn(self._me['username'])
self._mox.ReplayAll()
def tearDown(self):
super(UsersTests, self).tearDown()
self._mox.UnsetStubs()
def get_followings(self):
body = self._other
body.update({
'status': 1,
'id': 1,
'avatar': '',
'gold': Level['Gold']['Register'],
'following': 0,
'blocked': 0,
'following_url': '/api/user/following',
'blocked_url': '/api/user/blocked',
})
r = self.get(self._path)
self.assertEqual(r.code, 200)
self.assertDictEqual(json.loads(r.body), body)
self._mox.VerifyAll()
class BlockOneTests(BaseTestCase):
_path = '/api/user/block/{0}'
def setUp(self):
super(UsersTests, self).setUp()
self._mox = mox.Mox()
self._other = DATA
self._me = {
'username': 'root',
'password': 'utellme',
'email': 'root@os.com',
}
User.create(**self._me)
User.create(**self._other)
self._mox.StubOutWithMock(APIHandler,
'get_current_user', use_mock_anything=True)
APIHandler.get_current_user().AndReturn(self._me['username'])
self._mox.ReplayAll()
def tearDown(self):
super(UsersTests, self).tearDown()
self._mox.UnsetStubs()
def test_follow_one(self):
r = self.post(self._path.format(self._other['username']))
self.assertEqual(r.code, 200)
self.assertDictEqual(json.loads(r.body), {'status': 1})
exists = Blocked.get_by_user_blocked(
self._me['username'], self._other['username'])
self.assertIsNotNone(exists)
self._mox.VerifyAll()
class UnblockOneTests(BaseTestCase):
_path = '/api/user/unblock/{0}'
def setUp(self):
super(UsersTests, self).setUp()
self._mox = mox.Mox()
self._other = DATA
self._me = {
'username': 'root',
'password': 'utellme',
'email': 'root@os.com',
}
User.create(**self._me)
User.create(**self._other)
Blocked.create(self._me['username'], self._other['username'])
self._mox.StubOutWithMock(APIHandler,
'get_current_user', use_mock_anything=True)
APIHandler.get_current_user().AndReturn(self._me['username'])
self._mox.ReplayAll()
def tearDown(self):
super(UsersTests, self).tearDown()
self._mox.UnsetStubs()
def test_unblock_one(self):
r = self.delete(self._path.format(self._other['username']))
self.assertEqual(r.code, 200)
self.assertDictEqual(json.loads(r.body), {'status': 1})
exists = Blocked.get_by_user_blocked(
self._me['username'], self._other['username'])
self.assertIsNone(exists)
self._mox.VerifyAll()
| 30.739003
| 77
| 0.560103
| 1,121
| 10,482
| 5.053524
| 0.119536
| 0.044484
| 0.051898
| 0.0609
| 0.805296
| 0.800353
| 0.800353
| 0.786231
| 0.786231
| 0.778994
| 0
| 0.012371
| 0.290498
| 10,482
| 340
| 78
| 30.829412
| 0.749361
| 0.002003
| 0
| 0.774648
| 0
| 0
| 0.159002
| 0.002008
| 0
| 0
| 0
| 0
| 0.084507
| 1
| 0.088028
| false
| 0.024648
| 0.024648
| 0
| 0.169014
| 0.003521
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
51eebea0dcf4734745c9c84d9e504bd433b86437
| 7,345
|
py
|
Python
|
tests/template2html/basic_creation_raises.py
|
derigible/project-builder
|
25e4f537b97b52c54d3a918c4414950b1123df82
|
[
"BSD-3-Clause"
] | null | null | null |
tests/template2html/basic_creation_raises.py
|
derigible/project-builder
|
25e4f537b97b52c54d3a918c4414950b1123df82
|
[
"BSD-3-Clause"
] | null | null | null |
tests/template2html/basic_creation_raises.py
|
derigible/project-builder
|
25e4f537b97b52c54d3a918c4414950b1123df82
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Created on Feb 8, 2015
@author: derigible
'''
import os
from tests.base_test import BaseTest as base
from templater import testdata
from templater.structure import Template
from string import Template as stemp
from templater.parser import approved_block_types
class Template2HtmlTest(base):
'''
Tests the functionality of the templater module.
'''
test_dir = __test_data_dir__ = testdata.__path__[0]
working_templates = os.path.join(test_dir, "templates")
nonworking_templates = os.path.join(test_dir, "badtemplates")
allowed_additional_words = approved_block_types
not_allowed_additional_words = ("the", "keep", "nested", "inhert", "extend", "extends")
header = '''<meta charset="ISO-8859-1">
<title>Wilkins</title>
<link href="https://edge.fscdn.org/assets/docs/fs_logo_favicon.ico" rel="icon" type="image/x-icon" />
<!-- <link href="http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/css/bootstrap.min.css" rel="stylesheet" media="screen"> -->
<link href="/static/css/bootstrap.min.css" rel="stylesheet" media="screen">
<!-- <link href='https://edge.fscdn.org/assets/css/responsive-166fbb8fd4a3f5207a500bdf6c2d9186.css' rel='stylesheet' media='screen'> -->
<link href='/static/css/responsive-166fbb8fd4a3f5207a500bdf6c2d9186.css' rel='stylesheet' media='screen'>
<!-- <link href='https://edge.fscdn.org/assets/css/layout/theme-engage-8e8aed919ce18a2f4b2a470bfc58b928.css' rel='stylesheet' media='screen'> -->
<link href='/static/css/theme-engage-8e8aed919ce18a2f4b2a470bfc58b928.css' rel='stylesheet' media='screen'>
<style type="text/css">
#global-engage-header !css padding-top: 25px;}
h1 !css
padding-top: .5em;
padding-bottom: 0;
margin-bottom: 0;
}
h2 !css
font-size: 1.5em;
padding-bottom: 0;
margin-bottom: 0;
}
</style>'''
def test_endblock_without_startblock_found_raises_error(self):
html = stemp('''<!DOCTYPE html>
<html>
{% block header %}
<head>
$var
</head>
{% endblock heade %}
<body>
</body>
</html>''').substitute(var = self.header)
template = Template(html, self.working_templates)
with self.assertRaises(KeyError):
template.sections
def test_startblock_without_endblock_found_raises_error(self):
html = stemp('''<!DOCTYPE html>
<html>
{% block header %}
<head>
$var
</head>
<body>
</body>
</html>''').substitute(var = self.header)
template = Template(html, self.working_templates)
with self.assertRaises(KeyError):
template.sections
def test_startblock_missing_first_modulo_found_raises_error(self):
html = stemp('''<!DOCTYPE html>
<html>
{ block header %}
<head>
$var
</head>
{% endblock header %}
<body>
</body>
</html>''').substitute(var = self.header)
template = Template(html, self.working_templates)
with self.assertRaises(SyntaxError):
template.sections
def test_startblock_missing_second_modulo_found_raises_error(self):
html = stemp('''<!DOCTYPE html>
<html>
{% block header }
<head>
$var
</head>
{% endblock header %}
<body>
</body>
</html>''').substitute(var = self.header)
template = Template(html, self.working_templates)
with self.assertRaises(SyntaxError):
template.sections
def test_startblock_block_spelled_wrong_found_raises_error(self):
html = stemp('''<!DOCTYPE html>
<html>
{% blok header %}
<head>
$var
</head>
{% endblock header %}
<body>
</body>
</html>''').substitute(var = self.header)
template = Template(html, self.working_templates)
with self.assertRaises(SyntaxError):
template.sections
def test_startblock_missing_starting_brace_found_raises_error(self):
html = stemp('''<!DOCTYPE html>
<html>
% block header %}
<head>
$var
</head>
{% endblock header %}
<body>
</body>
</html>''').substitute(var = self.header)
template = Template(html, self.working_templates)
with self.assertRaises(SyntaxError):
template.sections
def test_startblock_missing_ending_brace_found_raises_error(self):
html = stemp('''<!DOCTYPE html>
<html>
{% block header %
<head>
$var
</head>
{% endblock header %}
<body>
</body>
</html>''').substitute(var = self.header)
template = Template(html, self.working_templates)
with self.assertRaises(SyntaxError):
template.sections
def test_endblock_missing_first_modulo_found_raises_error(self):
html = stemp('''<!DOCTYPE html>
<html>
{% block header %}
<head>
$var
</head>
{ endblock header %}
<body>
</body>
</html>''').substitute(var = self.header)
template = Template(html, self.working_templates)
with self.assertRaises(SyntaxError):
template.sections
def test_endblock_missing_second_modulo_found_raises_error(self):
html = stemp('''<!DOCTYPE html>
<html>
{% block header %}
<head>
$var
</head>
{% endblock header }
<body>
</body>
</html>''').substitute(var = self.header)
template = Template(html, self.working_templates)
with self.assertRaises(SyntaxError):
template.sections
def test_endblock_block_spelled_wrong_found_raises_error(self):
html = stemp('''<!DOCTYPE html>
<html>
{% block header %}
<head>
$var
</head>
{% endlock header %}
<body>
</body>
</html>''').substitute(var = self.header)
template = Template(html, self.working_templates)
with self.assertRaises(SyntaxError):
template.sections
def test_endblock_missing_starting_brace_found_raises_error(self):
html = stemp('''<!DOCTYPE html>
<html>
{% block header %}
<head>
$var
</head>
% endblock header %}
<body>
</body>
</html>''').substitute(var = self.header)
template = Template(html, self.working_templates)
with self.assertRaises(SyntaxError):
template.sections
def test_endblock_missing_ending_brace_found_raises_error(self):
html = stemp('''<!DOCTYPE html>
<html>
{% block header %}
<head>
$var
</head>
{% endblock header %
<body>
</body>
</html>''').substitute(var = self.header)
template = Template(html, self.working_templates)
with self.assertRaises(SyntaxError):
template.sections
def test_same_section_key_defined_twice_raises_error(self):
html = stemp('''<!DOCTYPE html>
<html>
{% block header %}
<head>
$var
</head>
{% endblock header %}
{% block header %}
<body>
</body>
</html>''').substitute(var = self.header)
template = Template(html, self.working_templates)
with self.assertRaises(KeyError):
template.sections
| 30.732218
| 153
| 0.608577
| 767
| 7,345
| 5.659713
| 0.186441
| 0.051601
| 0.044921
| 0.056899
| 0.823313
| 0.823313
| 0.811334
| 0.790371
| 0.76434
| 0.75144
| 0
| 0.018349
| 0.257999
| 7,345
| 239
| 154
| 30.732218
| 0.778165
| 0.012389
| 0
| 0.75
| 0
| 0.032407
| 0.42865
| 0.032356
| 0
| 0
| 0
| 0
| 0.060185
| 1
| 0.060185
| false
| 0
| 0.027778
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a40816cac9c77e74b3abc69212fe3097e1143bca
| 42
|
py
|
Python
|
autox/autox_competition/file_io/__init__.py
|
fanghy06/AutoX
|
0bad349ef1b047152e2608760fd5d197128be723
|
[
"Apache-2.0"
] | 499
|
2021-07-27T02:57:58.000Z
|
2022-03-28T12:08:27.000Z
|
autox/autox_competition/file_io/__init__.py
|
anonymouslycn/AutoX
|
ca1045c5919b4d97f1e4c2d8c235f63f6ec5af31
|
[
"Apache-2.0"
] | 9
|
2021-08-03T15:14:56.000Z
|
2022-03-11T07:06:06.000Z
|
autox/autox_competition/file_io/__init__.py
|
anonymouslycn/AutoX
|
ca1045c5919b4d97f1e4c2d8c235f63f6ec5af31
|
[
"Apache-2.0"
] | 87
|
2021-07-27T01:13:02.000Z
|
2022-03-29T02:14:09.000Z
|
from .read_data import read_data_from_path
| 42
| 42
| 0.904762
| 8
| 42
| 4.25
| 0.625
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 1
| 42
| 42
| 0.871795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5cd3b0cf8fb11a1b5c2b75468bd7cb4d6e113a39
| 1,180
|
py
|
Python
|
code/args_v_kwargs.py
|
codingforentrepreneurs/questions
|
864d24c9e6a264c1f5e56d22e33ce11245b175ec
|
[
"MIT"
] | 17
|
2021-01-23T22:41:28.000Z
|
2022-03-16T19:11:39.000Z
|
code/args_v_kwargs.py
|
codingforentrepreneurs/questions
|
864d24c9e6a264c1f5e56d22e33ce11245b175ec
|
[
"MIT"
] | null | null | null |
code/args_v_kwargs.py
|
codingforentrepreneurs/questions
|
864d24c9e6a264c1f5e56d22e33ce11245b175ec
|
[
"MIT"
] | 9
|
2021-01-25T23:19:36.000Z
|
2022-03-09T15:38:42.000Z
|
# from https://www.youtube.com/watch?v=GdSJAZDsCZA
def my_func():
print("hello world")
my_func()
def my_func():
print("hello world")
my_func("abc")
def my_func(*args):
print("hello world", args)
my_func("abc", "abc", 123, "abc",)
def my_func(key=None, *args):
print("hello world", args)
my_func("abc", "abc", 123, "abc", key=123)
def my_func(*args, **kwargs):
print("hello world", args, kwargs)
my_func("abc", "abc", 123, "abc", key=123, abc=123)
def my_func(abc=None, *args, **kwargs):
print("hello world", args, kwargs)
my_func("abc", abc=123)
def my_func(abc=None, *args, **kwargs):
print("hello world", args, kwargs)
my_func(abc=123)
def my_func(abc=None, *args, **kwargs):
print("hello world", args, kwargs)
my_func(abc=123, "abc")
def my_func(arg_1, *args, **kwargs):
print("hello world", args, kwargs)
my_func(abc=123, "abc")
def my_func(*args, **kwargs):
print("hello world", args, kwargs)
my_func("abc", abc=123)
def my_random_django_view(request, **kwargs):
print(kwargs)
# Product.objects.get(id=kwargs.get('id'))
# path('my-product/:id')
my_random_django_view("request", id='some_id')
| 16.388889
| 51
| 0.641525
| 187
| 1,180
| 3.898396
| 0.171123
| 0.164609
| 0.148148
| 0.208505
| 0.835391
| 0.753086
| 0.740741
| 0.740741
| 0.63786
| 0.63786
| 0
| 0.03424
| 0.158475
| 1,180
| 71
| 52
| 16.619718
| 0.699899
| 0.094915
| 0
| 0.636364
| 0
| 0
| 0.156015
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7a2f7fab09a9874cb1335229bd94061181bfb642
| 193
|
py
|
Python
|
hokudai_furima/product/templatetags/show_product_list.py
|
TetsuFe/hokuma
|
b981a52b3bf8d7268bf791c5827bbe8af90afef6
|
[
"MIT"
] | 1
|
2021-02-13T03:51:42.000Z
|
2021-02-13T03:51:42.000Z
|
hokudai_furima/product/templatetags/show_product_list.py
|
TetsuFe/hokuma
|
b981a52b3bf8d7268bf791c5827bbe8af90afef6
|
[
"MIT"
] | null | null | null |
hokudai_furima/product/templatetags/show_product_list.py
|
TetsuFe/hokuma
|
b981a52b3bf8d7268bf791c5827bbe8af90afef6
|
[
"MIT"
] | 1
|
2021-09-18T09:25:48.000Z
|
2021-09-18T09:25:48.000Z
|
from django import template
register = template.Library()
@register.inclusion_tag('product/_product_list.html')
def show_product_list(product_list):
return {'product_list': product_list}
| 24.125
| 53
| 0.797927
| 25
| 193
| 5.84
| 0.56
| 0.376712
| 0.246575
| 0.30137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098446
| 193
| 7
| 54
| 27.571429
| 0.83908
| 0
| 0
| 0
| 0
| 0
| 0.196891
| 0.134715
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
8fa2ba4600f4f8116b960463cc3d26934d16b306
| 2,867
|
py
|
Python
|
b-tool/local/b-tool.py
|
shyamjangid07/Reverse-Engineering
|
469efabcd6057f7895d8d891f1fabdf2ffe730b0
|
[
"Apache-2.0"
] | 337
|
2020-08-15T12:22:14.000Z
|
2022-03-29T06:05:15.000Z
|
b-tool/local/b-tool.py
|
Wh014M/Reverse-Engineering
|
f7aae2c43f7ea4a6730964d085c07814b6660a53
|
[
"Apache-2.0"
] | 3
|
2020-11-12T14:30:48.000Z
|
2021-05-18T16:56:22.000Z
|
b-tool/local/b-tool.py
|
Wh014M/Reverse-Engineering
|
f7aae2c43f7ea4a6730964d085c07814b6660a53
|
[
"Apache-2.0"
] | 83
|
2020-08-15T00:22:58.000Z
|
2022-03-31T08:40:23.000Z
|
# Auther : Binyamin
# GitHub : https://github.com/Binyamin-binni
# YouTube Channel : Trick Proof
import marshal
exec(marshal.loads('c\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00@\x00\x00\x00s!\x00\x00\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00e\x00\x00j\x01\x00d\x02\x00\x83\x01\x00d\x01\x00\x04Ud\x01\x00S(\x03\x00\x00\x00i\xff\xff\xff\xffNs$\x03\x00\x00x\x9c\xcdU]O\xdb0\x14}N~\xc5\x9d7A*hi\n\x05\x8d\t\xa4uB\x80\xc4(\xa2E0\xc1\x14%\xa9K\xad%v\x948\xe3\xe3\xd7\xef\xdaiS7m\xa1\x83=\xcc/q\xef=\xe7\\\xdf\xe3\x8f\xb28\x11\xa9\x04\x91mfO\xd9\xa6d1\xb5\xed\x8fj\xc0Y\xf7\xb8\x0bzjG\xe2^\xc0\x01\x10B\xec\xbb\xe6\xf6\xf6\xad\xfb\xe5\xb3\x1bw*\x03\x16\x8c\x12\xbe\x17w\xf6+c\x11\xa1\xc4\xb7&\xf8B{\x19\xc1\xd47A\xcb\x08%\xbe\x1d\x8fA+\xe2\xf7V\xc3\xcf\xf1vJ\xde\x8b},\xaf7\xb5K\xd1\xea3c\x9e\xe6..W\xc7\x0f\xf4\xbb\xdd3\xc0\xc9\x02\xda\x92\xee^\xab\xd6z\xa3)o5\xb3]n\xf2\xb4\xbb\xa5\xbc\x97\x8e\xde\xf2b\xc6\xc6\xadt`+\xac\xf9[a\x97\xb9\xdd\xb8\xfe\xd7\xc36\x94\xbf\xe6rDS\xac\xb6\x0f\x1d\xc6\x9f\xfc\x98qc\'\x8e\x99<\xc9\x03\x9d\x1dI\x99d\xfb[[\xf7L\x8e\xf2\xa0\x11\x8ax+\x18\x13\xea8\xe1\xcc0\xf4\x87\xc8\xfby@\x15\xad\x9f\xb2\xf0\x17\\\xa4B\x0cM\x17\xf0\xeeg\x89\x90\x86\xaeT\xc0D\xe1\x1a\xc18\xab\x8a\xbc\xb3\xd3\xa5\xe6\x8e\x87z|^\xc3\xd8\x1d|\xa5\xd6\xa7\x8b_\xb7/\xcd\x80\x8b\x81c3\xd0\xc2\xc0\xb5\x19\xd8\xc3@\xcf\x0c\xecb\xe0\xc2\x0c\xb41`\x0f\xe8\x10\xc2\xc0\xa9\xed\xeb\x15\x89\xac\x81o\xa7\xa4\xb1\xb3\x1eF\xd4O\xd7k\xc5\x13\n\xea5\x85,\xa24\xd1\xef\xa8\xa6\xc9\tK%\x1b:\xe9\xb8\xb5"\xe5.\xc85\x1bMw\xa2\x97\xa4\x8cK\xc8\xe4`*\x97d\x81\xf3\x8c,k(R\xa0\xc08<\xc3\x06\x90;N0f\xe1\xb2\x1a\x08\x17\xb9l<\xa4LR\x87\xd6f\xa3\xc3(\xcfF\x8e\n\xaa\xe2Ec1\xe5\xf9d!\xaaI=\xd1\xa5\x1d\xf5?`\xfc62\xbd\rr\xeb\xfe$\x1b\xc7\x1b\x04N\xcf{\xfd\xafgg\xd0\xb99%\xb5\n\xa6\x85\x98\x0b\x13s>\x07\xd9FH\xc7\x84\xec\\\xcd`.\x11\xd3D\x0c~\xe1\xe8\xe6\xb4O\xaa+\x8a\xd5\xa2u\'z\x8b\xac \xc6\rL\xfd\x07\x8f\xf1$\x97\xce\xb52\x08\x0e\x0f\x0f\xd5\xa1\xc2\xd6\xd9\x10\x14\xe2\x80h\xcb\n\x8f\x9dK\x05\xea\xd1\x88\x86\x12|\xf8\xedGl\x00"\x91Lp\xf8\xa0H\x96.b\xd1\xa8d\xbb\x9a\xae\r\x1b\xab\x8c\xed\xb2\xac\xe9\x01!i\x0c\xf5t\x08\x9fN\xba\xdf\x8f\xb6\x82GF*\xf9pP\xe4`m\r\xf0\xfeB\x18\tN_\xbf\xd1\xa5T\xe1\x02~\xf1\\\x90o\x82\xdf\xa7\xbe\xcc#_-<S\x1b\x02}!"8\xf1\xf1\x07\xa5\x1cNy&\xfd(\xa2\x03\xe8\xe5aH\xb3l\x98G\xd1S\xa1\xa4\x14\xce\xc5\x03<\x89\x1cB\x9fc\xfbH\x90#\x96\x81T\x1a(\x91g\xb9\x1fi\xb0q`\xdb\xda\x1d}\x86f\xfci)\x7f\xca3eU\x0f\x955{\x93*Fq2\x8fx\xb3U\xa5\xd8\xd8,=]\xe8\xd7\xf9\xcav\x95\x1a+:V\xb9\xe5\xedqh\x81o\xdb\xef\xf1m\'\xff\x97\xc6M\xd5^un\xe7\xea?\xb0\xae9\xb1\xceh\x9e>2\x89*\x88\xf1<\xee\xc7\xd4\xf3\x10\t\xc4\xf3b\x9fq\xcf#\xc5\xbbW\x88\xfd\x01;\xaf\xc5m(\x02\x00\x00\x00t\x04\x00\x00\x00zlibt\n\x00\x00\x00decompress(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\n\x00\x00\x00<binyamin>t\x08\x00\x00\x00<module>\x04\x00\x00\x00s\x02\x00\x00\x00\x0c\x01'))
| 573.4
| 2,755
| 0.734915
| 643
| 2,867
| 3.272162
| 0.499222
| 0.094106
| 0.089829
| 0.085551
| 0.038023
| 0.038023
| 0.027091
| 0.027091
| 0.015684
| 0.015684
| 0
| 0.245965
| 0.00593
| 2,867
| 5
| 2,755
| 573.4
| 0.492281
| 0.031392
| 0
| 0
| 0
| 1.5
| 0.626893
| 0.626532
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
64ede864065d8c0416140cc14575bf4eaebc8e4f
| 151
|
py
|
Python
|
tsrc/cli/version.py
|
tronje/tsrc
|
30b82bbd37e9a673833d553ac0d25f26cf270347
|
[
"BSD-3-Clause"
] | null | null | null |
tsrc/cli/version.py
|
tronje/tsrc
|
30b82bbd37e9a673833d553ac0d25f26cf270347
|
[
"BSD-3-Clause"
] | null | null | null |
tsrc/cli/version.py
|
tronje/tsrc
|
30b82bbd37e9a673833d553ac0d25f26cf270347
|
[
"BSD-3-Clause"
] | null | null | null |
""" Entry point for tsrc version """
import tsrc
def version() -> None:
""" show version number """
print("tsrc version", tsrc.__version__)
| 16.777778
| 43
| 0.635762
| 18
| 151
| 5.111111
| 0.611111
| 0.358696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211921
| 151
| 8
| 44
| 18.875
| 0.773109
| 0.324503
| 0
| 0
| 0
| 0
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8f28e45fc176ecef25e4cf6b5d272b004fd36d0a
| 968
|
py
|
Python
|
torch_opt/torch.cat.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
torch_opt/torch.cat.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
torch_opt/torch.cat.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
import torch
# 二维数组
A = torch.ones(2,3) #2x3的张量(矩阵)
print("A:",A)
B=2*torch.ones(4,3) #4x3的张量(矩阵)
print("B:",B)
C=torch.cat((A,B),0)#按维数0(行)拼接
print("C:",C)
print(C.size())
#二维数组dim=1
A = torch.ones(3,2) #2x3的张量(矩阵)
print("A:",A)
B=2*torch.ones(3,4) #4x3的张量(矩阵)
print("B:",B)
C=torch.cat((A,B),1)#按维数1(行)拼接
print("C:",C)
print(C.size())
# 三维数组 dim=0
A = torch.ones(2,2,3) #2x2x3的张量(矩阵)
print("A:",A)
B=2*torch.ones(4,2,3) #4x3的张量(矩阵)
print("B:",B)
C=torch.cat((A,B),0)#按维数0(行)拼接
print("C:",C)
print(C.size())
# 三维数组 dim=1
A = torch.ones(2,2,3) #2x2x3的张量(矩阵)
print("A:",A)
B=2*torch.ones(2,4,3) #4x3的张量(矩阵)
print("B:",B)
C=torch.cat((A,B),1)#按维数1(行)拼接
print("C:",C)
print(C.size())
# 三维数组 dim=2
A = torch.ones(2,3,2) #2x2x3的张量(矩阵)
print("A:",A)
B=2*torch.ones(2,3,4) #4x3的张量(矩阵)
print("B:",B)
C=torch.cat([A,B],2)#按维数 2 拼接
print("C:",C)
print(C.size())
torch.manual_seed(0)
x_2_input = torch.randn(8,3,24,24)
c = x_2_input[7]
print(c)
print(c.size())
| 17.6
| 36
| 0.60124
| 223
| 968
| 2.587444
| 0.156951
| 0.124783
| 0.103986
| 0.114385
| 0.811092
| 0.769497
| 0.769497
| 0.736568
| 0.736568
| 0.644714
| 0
| 0.089655
| 0.10124
| 968
| 54
| 37
| 17.925926
| 0.573563
| 0.21281
| 0
| 0.634146
| 0
| 0
| 0.040541
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02439
| 0
| 0.02439
| 0.536585
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
8f648fe383bf0a579c3bc51328aa6fd35cc38f25
| 12,221
|
py
|
Python
|
haychecker/_test/dhc/timeliness_test.py
|
fruttasecca/hay_checker
|
2bbf4e8e90e0abc590dd74080fb6e4f445056354
|
[
"MIT"
] | 2
|
2019-05-22T08:24:38.000Z
|
2020-12-04T13:36:30.000Z
|
haychecker/_test/dhc/timeliness_test.py
|
fruttasecca/hay_checker
|
2bbf4e8e90e0abc590dd74080fb6e4f445056354
|
[
"MIT"
] | null | null | null |
haychecker/_test/dhc/timeliness_test.py
|
fruttasecca/hay_checker
|
2bbf4e8e90e0abc590dd74080fb6e4f445056354
|
[
"MIT"
] | 3
|
2018-09-15T13:40:40.000Z
|
2021-06-29T23:31:18.000Z
|
import random
import unittest
import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, to_timestamp, to_date
from pyspark.sql.types import StringType, StructField, StructType, IntegerType, FloatType
from haychecker.dhc.metrics import timeliness
replace_empty_with_null = udf(lambda x: None if x == "" else x, StringType())
replace_0_with_null = udf(lambda x: None if x == 0 else x, IntegerType())
replace_0dot_with_null = udf(lambda x: None if x == 0. else x, FloatType())
replace_every_string_with_null = udf(lambda x: None, StringType())
replace_every_int_with_null = udf(lambda x: None, IntegerType())
replace_every_float_with_null = udf(lambda x: None, FloatType())
class TestTimeliness(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestTimeliness, self).__init__(*args, **kwargs)
self.spark = SparkSession.builder.master("local[2]").appName("timeliness_test").getOrCreate()
self.spark.sparkContext.setLogLevel("ERROR")
def test_empty(self):
data = pd.DataFrame()
data["c1"] = []
data["c2"] = []
schema = [StructField("c1", StringType(), True), StructField("c2", StringType(), True)]
df = self.spark.createDataFrame(data, StructType(schema))
r1, r2 = timeliness(["c1", "c2"], dateFormat="dd:MM:yyyy", df=df, value="10:22:1980")
self.assertEqual(r1, 100.)
self.assertEqual(r2, 100.)
def test_allnull(self):
data = pd.DataFrame()
data["c1"] = [chr(i) for i in range(100)]
data["c2"] = [chr(i + 1) for i in range(100)]
schema = [StructField("c1", StringType(), True), StructField("c2", StringType(), True)]
df = self.spark.createDataFrame(data, StructType(schema))
df = df.withColumn("c1", replace_every_string_with_null(df["c1"]))
df = df.withColumn("c2", replace_every_string_with_null(df["c2"]))
r1, r2 = timeliness(["c1", "c2"], dateFormat="dd:MM:yyyy", df=df, value="10:22:1980")
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
r1, r2 = timeliness(["c1", "c2"], timeFormat="ss:mm:HH", df=df, value="10:22:19")
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
def test_dateformat(self):
format = "dd/MM/yyyy"
# test wrong type of column
data = pd.DataFrame()
dates = [i for i in range(100)]
data["c1"] = dates
df = self.spark.createDataFrame(data)
value = "21/05/2000"
with self.assertRaises(SystemExit) as cm:
r1 = timeliness(["c1"], dateFormat=format, df=df, value=value)
# test correct type
data = pd.DataFrame()
dates = ["10/05/2000" for _ in range(50)]
dates.extend(["20/05/2000" for _ in range(50)])
random.shuffle(dates)
data["c1"] = dates
df = self.spark.createDataFrame(data)
df = df.withColumn("c2", to_timestamp(df["c1"], format))
df = df.withColumn("c3", to_date(df["c1"], format))
value = "21/05/2000"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], dateFormat=format, df=df, value=value)
self.assertEqual(r1, 100.)
self.assertEqual(r2, 100.)
self.assertEqual(r3, 100.)
value = "20/05/2000"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], dateFormat=format, df=df, value=value)
self.assertEqual(r1, 50.)
self.assertEqual(r2, 50.)
self.assertEqual(r3, 50.)
value = "10/05/2000"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], dateFormat=format, df=df, value=value)
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
self.assertEqual(r3, 0.)
value = "12/12/1999"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], dateFormat=format, df=df, value=value)
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
self.assertEqual(r3, 0.)
data = pd.DataFrame()
dates = ["10/05/2000" for _ in range(50)]
dates.extend(["20/05/2000" for _ in range(50)])
for i in range(10):
dates[i] = ""
dates[-(i + 1)] = ""
random.shuffle(dates)
data["c1"] = dates
df = self.spark.createDataFrame(data)
df = df.withColumn("c1", replace_empty_with_null(df["c1"]))
df = df.withColumn("c2", to_timestamp(df["c1"], format))
df = df.withColumn("c3", to_date(df["c1"], format))
value = "21/05/2000"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], dateFormat=format, df=df, value=value)
self.assertEqual(r1, 80.)
self.assertEqual(r2, 80.)
self.assertEqual(r3, 80.)
value = "20/05/2000"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], dateFormat=format, df=df, value=value)
self.assertEqual(r1, 40.)
self.assertEqual(r2, 40.)
self.assertEqual(r3, 40.)
value = "10/05/2000"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], dateFormat=format, df=df, value=value)
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
self.assertEqual(r3, 0.)
value = "12/12/1999"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], dateFormat=format, df=df, value=value)
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
self.assertEqual(r3, 0.)
def test_timeformat_nodate(self):
format = "ss:HH:mm"
# test wrong type of column
data = pd.DataFrame()
times = [i for i in range(100)]
data["c1"] = times
df = self.spark.createDataFrame(data)
value = "21:05:50"
with self.assertRaises(SystemExit) as cm:
r1 = timeliness(["c1"], timeFormat=format, df=df, value=value)
# test correct type
data = pd.DataFrame()
times = ["10:05:50" for _ in range(50)]
times.extend(["01:18:01" for _ in range(50)])
random.shuffle(times)
data["c1"] = times
df = self.spark.createDataFrame(data)
df = df.withColumn("c2", to_timestamp(df["c1"], format))
df = df.withColumn("c3", to_timestamp(df["c1"], format))
value = "01:19:01"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 100.)
self.assertEqual(r2, 100.)
self.assertEqual(r3, 100.)
value = "00:18:01"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 50.)
self.assertEqual(r2, 50.)
self.assertEqual(r3, 50.)
value = "10:05:50"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
self.assertEqual(r3, 0.)
value = "00:00:00"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
self.assertEqual(r3, 0.)
data = pd.DataFrame()
times = ["10:05:50" for _ in range(50)]
times.extend(["00:18:01" for _ in range(50)])
for i in range(10):
times[i] = ""
times[-(i + 1)] = ""
random.shuffle(times)
data["c1"] = times
df = self.spark.createDataFrame(data)
df = df.withColumn("c1", replace_empty_with_null(df["c1"]))
df = df.withColumn("c2", to_timestamp(df["c1"], format))
df = df.withColumn("c3", to_timestamp(df["c1"], format))
value = "00:19:00"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 80.)
self.assertEqual(r2, 80.)
self.assertEqual(r3, 80.)
value = "00:18:01"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 40.)
self.assertEqual(r2, 40.)
self.assertEqual(r3, 40.)
value = "10:05:50"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
self.assertEqual(r3, 0.)
value = "00:00:00"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
self.assertEqual(r3, 0.)
def test_timeformat_withdate(self):
format = "dd/MM/yyyy ss:HH:mm"
# test wrong type of column
data = pd.DataFrame()
times = [i for i in range(100)]
data["c1"] = times
df = self.spark.createDataFrame(data)
value = "01/10/1900 21:05:50"
with self.assertRaises(SystemExit) as cm:
r1 = timeliness(["c1"], timeFormat=format, df=df, value=value)
# test correct type
data = pd.DataFrame()
times = ["01/10/1900 21:05:50" for _ in range(50)]
times.extend(["01/10/1900 01:18:01" for _ in range(50)])
times.extend(["21/10/1900 01:18:01" for _ in range(100)])
random.shuffle(times)
data["c1"] = times
df = self.spark.createDataFrame(data)
df = df.withColumn("c2", to_timestamp(df["c1"], format))
df = df.withColumn("c3", to_timestamp(df["c1"], format))
value = "21/10/1900 01:19:01"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 100.)
self.assertEqual(r2, 100.)
self.assertEqual(r3, 100.)
value = "21/10/1900 01:18:01"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 50.)
self.assertEqual(r2, 50.)
self.assertEqual(r3, 50.)
value = "01/10/1900 01:18:01"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 25.)
self.assertEqual(r2, 25.)
self.assertEqual(r3, 25.)
value = "01/10/1900 21:05:10"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
self.assertEqual(r3, 0.)
value = "00/00/0000 00:00:00"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
self.assertEqual(r3, 0.)
data = pd.DataFrame()
times = ["01/10/1900 21:05:50" for _ in range(50)]
times.extend(["01/10/1900 01:18:01" for _ in range(50)])
times.extend(["21/10/1900 01:18:01" for _ in range(100)])
for i in range(10):
times[i] = ""
times[-(i + 1)] = ""
random.shuffle(times)
data["c1"] = times
df = self.spark.createDataFrame(data)
df = df.withColumn("c1", replace_empty_with_null(df["c1"]))
df = df.withColumn("c2", to_timestamp(df["c1"], format))
df = df.withColumn("c3", to_timestamp(df["c1"], format))
value = "21/10/1900 01:19:01"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 90.)
self.assertEqual(r2, 90.)
self.assertEqual(r3, 90.)
value = "21/10/1900 01:18:01"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 45.)
self.assertEqual(r2, 45.)
self.assertEqual(r3, 45.)
value = "01/10/1900 01:18:01"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 20.)
self.assertEqual(r2, 20.)
self.assertEqual(r3, 20.)
value = "01/10/1900 21:05:10"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
self.assertEqual(r3, 0.)
value = "00/00/0000 00:00:00"
r1, r2, r3 = timeliness(["c1", "c2", "c3"], timeFormat=format, df=df, value=value)
self.assertEqual(r1, 0.)
self.assertEqual(r2, 0.)
self.assertEqual(r3, 0.)
| 38.674051
| 101
| 0.568448
| 1,642
| 12,221
| 4.180877
| 0.076736
| 0.18354
| 0.050983
| 0.063365
| 0.874727
| 0.857975
| 0.831173
| 0.826074
| 0.79228
| 0.781355
| 0
| 0.104938
| 0.259226
| 12,221
| 315
| 102
| 38.796825
| 0.653375
| 0.010719
| 0
| 0.788462
| 0
| 0
| 0.078954
| 0
| 0
| 0
| 0
| 0
| 0.334615
| 1
| 0.023077
| false
| 0
| 0.026923
| 0
| 0.053846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8f6c115730ad599785b37da1bb928734243484df
| 79
|
py
|
Python
|
baseline/__init__.py
|
yufuinn/baseline
|
9226e5704b18da1661b1d46a688885e48eba3952
|
[
"MIT"
] | null | null | null |
baseline/__init__.py
|
yufuinn/baseline
|
9226e5704b18da1661b1d46a688885e48eba3952
|
[
"MIT"
] | null | null | null |
baseline/__init__.py
|
yufuinn/baseline
|
9226e5704b18da1661b1d46a688885e48eba3952
|
[
"MIT"
] | null | null | null |
from baseline import utils
#from baseline import tf
from baseline import torch
| 19.75
| 26
| 0.835443
| 12
| 79
| 5.5
| 0.5
| 0.545455
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151899
| 79
| 3
| 27
| 26.333333
| 0.985075
| 0.291139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
56c6f12cdb9a99bd71321dc78e45ed6841caab7a
| 21,586
|
py
|
Python
|
sdk/python/pulumi_pagerduty/ruleset_rule.py
|
pulumi/pulumi-pagerduty
|
1c08849cda3d5fccf5eb9f615dc004b1f8f90555
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2020-05-27T08:18:35.000Z
|
2021-07-31T08:40:03.000Z
|
sdk/python/pulumi_pagerduty/ruleset_rule.py
|
pulumi/pulumi-pagerduty
|
1c08849cda3d5fccf5eb9f615dc004b1f8f90555
|
[
"ECL-2.0",
"Apache-2.0"
] | 48
|
2020-05-26T10:59:40.000Z
|
2022-03-31T15:41:54.000Z
|
sdk/python/pulumi_pagerduty/ruleset_rule.py
|
pulumi/pulumi-pagerduty
|
1c08849cda3d5fccf5eb9f615dc004b1f8f90555
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-26T17:51:56.000Z
|
2020-05-26T17:51:56.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RulesetRuleArgs', 'RulesetRule']
@pulumi.input_type
class RulesetRuleArgs:
def __init__(__self__, *,
ruleset: pulumi.Input[str],
actions: Optional[pulumi.Input['RulesetRuleActionsArgs']] = None,
conditions: Optional[pulumi.Input['RulesetRuleConditionsArgs']] = None,
disabled: Optional[pulumi.Input[bool]] = None,
position: Optional[pulumi.Input[int]] = None,
time_frame: Optional[pulumi.Input['RulesetRuleTimeFrameArgs']] = None,
variables: Optional[pulumi.Input[Sequence[pulumi.Input['RulesetRuleVariableArgs']]]] = None):
"""
The set of arguments for constructing a RulesetRule resource.
:param pulumi.Input[str] ruleset: The ID of the ruleset that the rule belongs to.
:param pulumi.Input['RulesetRuleActionsArgs'] actions: Actions to apply to an event if the conditions match.
:param pulumi.Input['RulesetRuleConditionsArgs'] conditions: Conditions evaluated to check if an event matches this event rule. Is always empty for the catch all rule, though.
:param pulumi.Input[bool] disabled: Indicates whether the rule is disabled and would therefore not be evaluated.
:param pulumi.Input[int] position: Position/index of the rule within the ruleset.
:param pulumi.Input['RulesetRuleTimeFrameArgs'] time_frame: Settings for [scheduling the rule](https://support.pagerduty.com/docs/rulesets#section-scheduled-event-rules).
:param pulumi.Input[Sequence[pulumi.Input['RulesetRuleVariableArgs']]] variables: Populate variables from event payloads and use those variables in other event actions. *NOTE: A rule can have multiple `variable` objects.*
"""
pulumi.set(__self__, "ruleset", ruleset)
if actions is not None:
pulumi.set(__self__, "actions", actions)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if position is not None:
pulumi.set(__self__, "position", position)
if time_frame is not None:
pulumi.set(__self__, "time_frame", time_frame)
if variables is not None:
pulumi.set(__self__, "variables", variables)
@property
@pulumi.getter
def ruleset(self) -> pulumi.Input[str]:
"""
The ID of the ruleset that the rule belongs to.
"""
return pulumi.get(self, "ruleset")
@ruleset.setter
def ruleset(self, value: pulumi.Input[str]):
pulumi.set(self, "ruleset", value)
@property
@pulumi.getter
def actions(self) -> Optional[pulumi.Input['RulesetRuleActionsArgs']]:
"""
Actions to apply to an event if the conditions match.
"""
return pulumi.get(self, "actions")
@actions.setter
def actions(self, value: Optional[pulumi.Input['RulesetRuleActionsArgs']]):
pulumi.set(self, "actions", value)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input['RulesetRuleConditionsArgs']]:
"""
Conditions evaluated to check if an event matches this event rule. Is always empty for the catch all rule, though.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input['RulesetRuleConditionsArgs']]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the rule is disabled and would therefore not be evaluated.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def position(self) -> Optional[pulumi.Input[int]]:
"""
Position/index of the rule within the ruleset.
"""
return pulumi.get(self, "position")
@position.setter
def position(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "position", value)
@property
@pulumi.getter(name="timeFrame")
def time_frame(self) -> Optional[pulumi.Input['RulesetRuleTimeFrameArgs']]:
"""
Settings for [scheduling the rule](https://support.pagerduty.com/docs/rulesets#section-scheduled-event-rules).
"""
return pulumi.get(self, "time_frame")
@time_frame.setter
def time_frame(self, value: Optional[pulumi.Input['RulesetRuleTimeFrameArgs']]):
pulumi.set(self, "time_frame", value)
@property
@pulumi.getter
def variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RulesetRuleVariableArgs']]]]:
"""
Populate variables from event payloads and use those variables in other event actions. *NOTE: A rule can have multiple `variable` objects.*
"""
return pulumi.get(self, "variables")
@variables.setter
def variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RulesetRuleVariableArgs']]]]):
pulumi.set(self, "variables", value)
@pulumi.input_type
class _RulesetRuleState:
def __init__(__self__, *,
actions: Optional[pulumi.Input['RulesetRuleActionsArgs']] = None,
conditions: Optional[pulumi.Input['RulesetRuleConditionsArgs']] = None,
disabled: Optional[pulumi.Input[bool]] = None,
position: Optional[pulumi.Input[int]] = None,
ruleset: Optional[pulumi.Input[str]] = None,
time_frame: Optional[pulumi.Input['RulesetRuleTimeFrameArgs']] = None,
variables: Optional[pulumi.Input[Sequence[pulumi.Input['RulesetRuleVariableArgs']]]] = None):
"""
Input properties used for looking up and filtering RulesetRule resources.
:param pulumi.Input['RulesetRuleActionsArgs'] actions: Actions to apply to an event if the conditions match.
:param pulumi.Input['RulesetRuleConditionsArgs'] conditions: Conditions evaluated to check if an event matches this event rule. Is always empty for the catch all rule, though.
:param pulumi.Input[bool] disabled: Indicates whether the rule is disabled and would therefore not be evaluated.
:param pulumi.Input[int] position: Position/index of the rule within the ruleset.
:param pulumi.Input[str] ruleset: The ID of the ruleset that the rule belongs to.
:param pulumi.Input['RulesetRuleTimeFrameArgs'] time_frame: Settings for [scheduling the rule](https://support.pagerduty.com/docs/rulesets#section-scheduled-event-rules).
:param pulumi.Input[Sequence[pulumi.Input['RulesetRuleVariableArgs']]] variables: Populate variables from event payloads and use those variables in other event actions. *NOTE: A rule can have multiple `variable` objects.*
"""
if actions is not None:
pulumi.set(__self__, "actions", actions)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if position is not None:
pulumi.set(__self__, "position", position)
if ruleset is not None:
pulumi.set(__self__, "ruleset", ruleset)
if time_frame is not None:
pulumi.set(__self__, "time_frame", time_frame)
if variables is not None:
pulumi.set(__self__, "variables", variables)
@property
@pulumi.getter
def actions(self) -> Optional[pulumi.Input['RulesetRuleActionsArgs']]:
"""
Actions to apply to an event if the conditions match.
"""
return pulumi.get(self, "actions")
@actions.setter
def actions(self, value: Optional[pulumi.Input['RulesetRuleActionsArgs']]):
pulumi.set(self, "actions", value)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input['RulesetRuleConditionsArgs']]:
"""
Conditions evaluated to check if an event matches this event rule. Is always empty for the catch all rule, though.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input['RulesetRuleConditionsArgs']]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the rule is disabled and would therefore not be evaluated.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def position(self) -> Optional[pulumi.Input[int]]:
"""
Position/index of the rule within the ruleset.
"""
return pulumi.get(self, "position")
@position.setter
def position(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "position", value)
@property
@pulumi.getter
def ruleset(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the ruleset that the rule belongs to.
"""
return pulumi.get(self, "ruleset")
@ruleset.setter
def ruleset(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ruleset", value)
@property
@pulumi.getter(name="timeFrame")
def time_frame(self) -> Optional[pulumi.Input['RulesetRuleTimeFrameArgs']]:
"""
Settings for [scheduling the rule](https://support.pagerduty.com/docs/rulesets#section-scheduled-event-rules).
"""
return pulumi.get(self, "time_frame")
@time_frame.setter
def time_frame(self, value: Optional[pulumi.Input['RulesetRuleTimeFrameArgs']]):
pulumi.set(self, "time_frame", value)
@property
@pulumi.getter
def variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RulesetRuleVariableArgs']]]]:
"""
Populate variables from event payloads and use those variables in other event actions. *NOTE: A rule can have multiple `variable` objects.*
"""
return pulumi.get(self, "variables")
@variables.setter
def variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RulesetRuleVariableArgs']]]]):
pulumi.set(self, "variables", value)
class RulesetRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
actions: Optional[pulumi.Input[pulumi.InputType['RulesetRuleActionsArgs']]] = None,
conditions: Optional[pulumi.Input[pulumi.InputType['RulesetRuleConditionsArgs']]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
position: Optional[pulumi.Input[int]] = None,
ruleset: Optional[pulumi.Input[str]] = None,
time_frame: Optional[pulumi.Input[pulumi.InputType['RulesetRuleTimeFrameArgs']]] = None,
variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RulesetRuleVariableArgs']]]]] = None,
__props__=None):
"""
An [event rule](https://support.pagerduty.com/docs/rulesets#section-create-event-rules) allows you to set actions that should be taken on events that meet your designated rule criteria.
## Import
Ruleset rules can be imported using using the related `ruleset` ID and the `ruleset_rule` ID separated by a dot, e.g.
```sh
$ pulumi import pagerduty:index/rulesetRule:RulesetRule main a19cdca1-3d5e-4b52-bfea-8c8de04da243.19acac92-027a-4ea0-b06c-bbf516519601
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['RulesetRuleActionsArgs']] actions: Actions to apply to an event if the conditions match.
:param pulumi.Input[pulumi.InputType['RulesetRuleConditionsArgs']] conditions: Conditions evaluated to check if an event matches this event rule. Is always empty for the catch all rule, though.
:param pulumi.Input[bool] disabled: Indicates whether the rule is disabled and would therefore not be evaluated.
:param pulumi.Input[int] position: Position/index of the rule within the ruleset.
:param pulumi.Input[str] ruleset: The ID of the ruleset that the rule belongs to.
:param pulumi.Input[pulumi.InputType['RulesetRuleTimeFrameArgs']] time_frame: Settings for [scheduling the rule](https://support.pagerduty.com/docs/rulesets#section-scheduled-event-rules).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RulesetRuleVariableArgs']]]] variables: Populate variables from event payloads and use those variables in other event actions. *NOTE: A rule can have multiple `variable` objects.*
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RulesetRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An [event rule](https://support.pagerduty.com/docs/rulesets#section-create-event-rules) allows you to set actions that should be taken on events that meet your designated rule criteria.
## Import
Ruleset rules can be imported using using the related `ruleset` ID and the `ruleset_rule` ID separated by a dot, e.g.
```sh
$ pulumi import pagerduty:index/rulesetRule:RulesetRule main a19cdca1-3d5e-4b52-bfea-8c8de04da243.19acac92-027a-4ea0-b06c-bbf516519601
```
:param str resource_name: The name of the resource.
:param RulesetRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RulesetRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
actions: Optional[pulumi.Input[pulumi.InputType['RulesetRuleActionsArgs']]] = None,
conditions: Optional[pulumi.Input[pulumi.InputType['RulesetRuleConditionsArgs']]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
position: Optional[pulumi.Input[int]] = None,
ruleset: Optional[pulumi.Input[str]] = None,
time_frame: Optional[pulumi.Input[pulumi.InputType['RulesetRuleTimeFrameArgs']]] = None,
variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RulesetRuleVariableArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RulesetRuleArgs.__new__(RulesetRuleArgs)
__props__.__dict__["actions"] = actions
__props__.__dict__["conditions"] = conditions
__props__.__dict__["disabled"] = disabled
__props__.__dict__["position"] = position
if ruleset is None and not opts.urn:
raise TypeError("Missing required property 'ruleset'")
__props__.__dict__["ruleset"] = ruleset
__props__.__dict__["time_frame"] = time_frame
__props__.__dict__["variables"] = variables
super(RulesetRule, __self__).__init__(
'pagerduty:index/rulesetRule:RulesetRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
actions: Optional[pulumi.Input[pulumi.InputType['RulesetRuleActionsArgs']]] = None,
conditions: Optional[pulumi.Input[pulumi.InputType['RulesetRuleConditionsArgs']]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
position: Optional[pulumi.Input[int]] = None,
ruleset: Optional[pulumi.Input[str]] = None,
time_frame: Optional[pulumi.Input[pulumi.InputType['RulesetRuleTimeFrameArgs']]] = None,
variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RulesetRuleVariableArgs']]]]] = None) -> 'RulesetRule':
"""
Get an existing RulesetRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['RulesetRuleActionsArgs']] actions: Actions to apply to an event if the conditions match.
:param pulumi.Input[pulumi.InputType['RulesetRuleConditionsArgs']] conditions: Conditions evaluated to check if an event matches this event rule. Is always empty for the catch all rule, though.
:param pulumi.Input[bool] disabled: Indicates whether the rule is disabled and would therefore not be evaluated.
:param pulumi.Input[int] position: Position/index of the rule within the ruleset.
:param pulumi.Input[str] ruleset: The ID of the ruleset that the rule belongs to.
:param pulumi.Input[pulumi.InputType['RulesetRuleTimeFrameArgs']] time_frame: Settings for [scheduling the rule](https://support.pagerduty.com/docs/rulesets#section-scheduled-event-rules).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RulesetRuleVariableArgs']]]] variables: Populate variables from event payloads and use those variables in other event actions. *NOTE: A rule can have multiple `variable` objects.*
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RulesetRuleState.__new__(_RulesetRuleState)
__props__.__dict__["actions"] = actions
__props__.__dict__["conditions"] = conditions
__props__.__dict__["disabled"] = disabled
__props__.__dict__["position"] = position
__props__.__dict__["ruleset"] = ruleset
__props__.__dict__["time_frame"] = time_frame
__props__.__dict__["variables"] = variables
return RulesetRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def actions(self) -> pulumi.Output[Optional['outputs.RulesetRuleActions']]:
"""
Actions to apply to an event if the conditions match.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter
def conditions(self) -> pulumi.Output[Optional['outputs.RulesetRuleConditions']]:
"""
Conditions evaluated to check if an event matches this event rule. Is always empty for the catch all rule, though.
"""
return pulumi.get(self, "conditions")
@property
@pulumi.getter
def disabled(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether the rule is disabled and would therefore not be evaluated.
"""
return pulumi.get(self, "disabled")
@property
@pulumi.getter
def position(self) -> pulumi.Output[Optional[int]]:
"""
Position/index of the rule within the ruleset.
"""
return pulumi.get(self, "position")
@property
@pulumi.getter
def ruleset(self) -> pulumi.Output[str]:
"""
The ID of the ruleset that the rule belongs to.
"""
return pulumi.get(self, "ruleset")
@property
@pulumi.getter(name="timeFrame")
def time_frame(self) -> pulumi.Output[Optional['outputs.RulesetRuleTimeFrame']]:
"""
Settings for [scheduling the rule](https://support.pagerduty.com/docs/rulesets#section-scheduled-event-rules).
"""
return pulumi.get(self, "time_frame")
@property
@pulumi.getter
def variables(self) -> pulumi.Output[Optional[Sequence['outputs.RulesetRuleVariable']]]:
"""
Populate variables from event payloads and use those variables in other event actions. *NOTE: A rule can have multiple `variable` objects.*
"""
return pulumi.get(self, "variables")
| 48.075724
| 247
| 0.666404
| 2,410
| 21,586
| 5.824896
| 0.087967
| 0.084627
| 0.081208
| 0.028423
| 0.8685
| 0.850406
| 0.832312
| 0.823052
| 0.819419
| 0.815928
| 0
| 0.004263
| 0.228435
| 21,586
| 448
| 248
| 48.183036
| 0.838607
| 0.353887
| 0
| 0.78327
| 1
| 0
| 0.137405
| 0.076721
| 0
| 0
| 0
| 0
| 0
| 1
| 0.159696
| false
| 0.003802
| 0.026616
| 0
| 0.281369
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
56d06bd3b0a5d0b10a117f91aeff64ab9c949106
| 126
|
py
|
Python
|
tests/test_dictionary.py
|
soumendrak/openodia
|
ac863df2bee71c60af6834ae629d12a8486b4870
|
[
"MIT"
] | 2
|
2021-10-04T05:40:16.000Z
|
2021-10-14T09:10:33.000Z
|
tests/test_dictionary.py
|
soumendrak/openodia
|
ac863df2bee71c60af6834ae629d12a8486b4870
|
[
"MIT"
] | 19
|
2021-09-25T18:08:01.000Z
|
2021-12-12T01:50:10.000Z
|
tests/test_dictionary.py
|
soumendrak/openodia
|
ac863df2bee71c60af6834ae629d12a8486b4870
|
[
"MIT"
] | 3
|
2021-09-25T17:49:28.000Z
|
2022-01-21T03:52:28.000Z
|
from openodia.corpus.dictionary import get_dictionary
def test_get_dictionary():
assert len(get_dictionary()) == 208177
| 21
| 53
| 0.785714
| 16
| 126
| 5.9375
| 0.6875
| 0.410526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054545
| 0.126984
| 126
| 5
| 54
| 25.2
| 0.809091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
71158a3fbb0471bab42b4118f91aa687bd5d507e
| 91
|
py
|
Python
|
querybook/server/scripts/init_es.py
|
shivammmmm/querybook
|
71263eb7db79e56235ea752f2cf3339ca9b3a092
|
[
"Apache-2.0"
] | 1,144
|
2021-03-30T05:06:16.000Z
|
2022-03-31T10:40:31.000Z
|
querybook/server/scripts/init_es.py
|
shivammmmm/querybook
|
71263eb7db79e56235ea752f2cf3339ca9b3a092
|
[
"Apache-2.0"
] | 593
|
2021-07-01T10:34:25.000Z
|
2022-03-31T23:24:40.000Z
|
querybook/server/scripts/init_es.py
|
shivammmmm/querybook
|
71263eb7db79e56235ea752f2cf3339ca9b3a092
|
[
"Apache-2.0"
] | 113
|
2021-03-30T00:07:20.000Z
|
2022-03-31T07:18:43.000Z
|
from logic.elasticsearch import create_indices_if_not_exist
create_indices_if_not_exist()
| 22.75
| 59
| 0.901099
| 14
| 91
| 5.285714
| 0.642857
| 0.351351
| 0.405405
| 0.486486
| 0.621622
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065934
| 91
| 3
| 60
| 30.333333
| 0.870588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
85326167929521dc2d0c4dcf78add3fc033ef47f
| 3,585
|
py
|
Python
|
tests/test_cusparse.py
|
emaballarin/binet
|
50fec5baf2611b44fb397016eaa60a4b68d487ff
|
[
"BSD-3-Clause"
] | 24
|
2015-11-12T15:57:27.000Z
|
2020-08-18T03:51:39.000Z
|
tests/test_cusparse.py
|
emaballarin/binet
|
50fec5baf2611b44fb397016eaa60a4b68d487ff
|
[
"BSD-3-Clause"
] | 4
|
2016-09-29T13:49:38.000Z
|
2018-08-29T06:07:37.000Z
|
tests/test_cusparse.py
|
emaballarin/binet
|
50fec5baf2611b44fb397016eaa60a4b68d487ff
|
[
"BSD-3-Clause"
] | 8
|
2016-01-19T20:37:44.000Z
|
2021-10-04T07:54:44.000Z
|
import nose
import copy
import numpy as np
from scipy import sparse
import pycuda.autoinit
import pycuda.gpuarray as gpu
from pycuda.driver import Stream
import binet.cusparse as cusparse
from nose.tools import assert_raises
from numpy.testing import assert_allclose, assert_array_equal
def test_cusparseScsrmm():
A = np.random.laplace(size=(5, 3)).astype(np.float32)
A[A<0.1] = 0
A = sparse.csr_matrix(A, dtype=np.float32)
B = np.random.normal(size=(3, 6)).astype(np.float32, order="f")
C = np.ones((A.shape[0], B.shape[1]), dtype=np.float32)
X_exp = (A*B) + 0.5*C
a_data = gpu.to_gpu(A.data)
a_indptr = gpu.to_gpu(A.indptr)
a_indices = gpu.to_gpu(A.indices)
b = gpu.to_gpu(B)
h = cusparse.cusparseCreate()
descrA = cusparse.cusparseCreateMatDescr()
c = gpu.empty((C.shape[1], C.shape[0]), dtype=A.dtype)
c.fill(1.0)
cusparse.cusparseScsrmm(h, cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE,
c.shape[1], c.shape[0], b.shape[0], A.nnz, 1.0,
descrA, a_data.gpudata, a_indptr.gpudata, a_indices.gpudata,
b.gpudata, b.shape[0], 0.5, c.gpudata, c.shape[1])
assert_allclose(c.get().T, X_exp, rtol=1e-4)
def test_cusparseScsrmm2_notranspose():
A = np.random.laplace(size=(5, 3)).astype(np.float32)
A[A<0.1] = 0
A = sparse.csr_matrix(A, dtype=np.float32)
B = np.random.normal(size=(3, 6)).astype(np.float32, order="f")
C = np.ones((A.shape[0], B.shape[1]), dtype=np.float32)
X_exp = (A*B) + 0.5*C
a_data = gpu.to_gpu(A.data)
a_indptr = gpu.to_gpu(A.indptr)
a_indices = gpu.to_gpu(A.indices)
b = gpu.to_gpu(B)
h = cusparse.cusparseCreate()
descrA = cusparse.cusparseCreateMatDescr()
c = gpu.empty((C.shape[1], C.shape[0]), dtype=A.dtype)
c.fill(1.0)
cusparse.cusparseScsrmm2(h, cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE,
cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE,
c.shape[1], c.shape[0], b.shape[0], A.nnz, 1.0,
descrA, a_data.gpudata, a_indptr.gpudata, a_indices.gpudata,
b.gpudata, b.shape[0], 0.5, c.gpudata, c.shape[1])
assert_allclose(c.get().T, X_exp, rtol=1e-4)
def test_cusparseScsr2dense():
A = np.random.laplace(size=(3, 5)).astype(np.float32)
A[A<0.1] = 0
A = sparse.csr_matrix(A, dtype=np.float32)
A.sort_indices()
a_data = gpu.to_gpu(A.data)
a_indptr = gpu.to_gpu(A.indptr)
a_indices = gpu.to_gpu(A.indices)
out = gpu.empty((A.shape[0], A.shape[1]), dtype=A.dtype, order="F")
h = cusparse.cusparseCreate()
descrA = cusparse.cusparseCreateMatDescr()
cusparse.cusparseScsr2dense(h, A.shape[0], A.shape[1],
descrA, a_data.gpudata, a_indptr.gpudata, a_indices.gpudata,
out.gpudata, out.shape[0])
assert_allclose(out.get(), A.A, rtol=1e-4)
def test_cusparseSetStream():
A = np.random.laplace(size=(3, 5)).astype(np.float32)
A[A<0.1] = 0
A = sparse.csr_matrix(A, dtype=np.float32)
A.sort_indices()
a_data = gpu.to_gpu(A.data)
a_indptr = gpu.to_gpu(A.indptr)
a_indices = gpu.to_gpu(A.indices)
out = gpu.empty((A.shape[0], A.shape[1]), dtype=A.dtype, order="F")
h = cusparse.cusparseCreate()
descrA = cusparse.cusparseCreateMatDescr()
stream = Stream()
cusparse.cusparseSetStream(h, stream.handle)
cusparse.cusparseScsr2dense(h, A.shape[0], A.shape[1],
descrA, a_data.gpudata, a_indptr.gpudata, a_indices.gpudata,
out.gpudata, out.shape[0])
cusparse.cusparseSetStream(h, 0)
stream.synchronize()
assert_allclose(out.get(), A.A, rtol=1e-4)
| 31.725664
| 74
| 0.662762
| 586
| 3,585
| 3.935154
| 0.133106
| 0.041631
| 0.048569
| 0.046834
| 0.806592
| 0.803556
| 0.786644
| 0.786644
| 0.786644
| 0.761492
| 0
| 0.035932
| 0.177127
| 3,585
| 112
| 75
| 32.008929
| 0.745763
| 0
| 0
| 0.752941
| 0
| 0
| 0.001116
| 0
| 0
| 0
| 0
| 0
| 0.070588
| 1
| 0.047059
| false
| 0
| 0.117647
| 0
| 0.164706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8568536488f34be01931724e4d1905dfc3fb45a9
| 4,797
|
py
|
Python
|
network_zoo.py
|
vinnamkim/large-batch-training
|
75ba057ff677980edfcbba3498fe9213a6bd2710
|
[
"MIT"
] | 122
|
2016-09-19T16:31:28.000Z
|
2022-03-28T13:41:34.000Z
|
network_zoo.py
|
vinnamkim/large-batch-training
|
75ba057ff677980edfcbba3498fe9213a6bd2710
|
[
"MIT"
] | 8
|
2017-04-05T18:09:35.000Z
|
2020-10-06T07:36:31.000Z
|
network_zoo.py
|
vinnamkim/large-batch-training
|
75ba057ff677980edfcbba3498fe9213a6bd2710
|
[
"MIT"
] | 20
|
2016-09-21T14:04:30.000Z
|
2022-03-03T07:40:54.000Z
|
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
img_size = (3, 32, 32)
def kerasnet(nb_classes):
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='valid',
input_shape=(3,32,32)))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 3, 3, border_mode='valid'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512))
model.add(BatchNormalization(mode=2))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
return model
def shallownet(nb_classes):
global img_size
model = Sequential()
model.add(Convolution2D(64, 5, 5, border_mode='same', input_shape=img_size))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2), border_mode='same'))
model.add(Convolution2D(64, 5, 5, border_mode='same'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2), border_mode='same'))
model.add(Flatten())
model.add(Dense(384))
model.add(BatchNormalization(mode=2))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(192))
model.add(BatchNormalization(mode=2))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))
return model
def deepnet(nb_classes):
global img_size
model = Sequential()
model.add(Convolution2D(64, 3, 3, border_mode='same', input_shape=img_size))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu')); model.add(Dropout(0.3))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), border_mode='same'))
model.add(Convolution2D(128, 3, 3, border_mode='same'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu')); model.add(Dropout(0.4))
model.add(Convolution2D(128, 3, 3, border_mode='same'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), border_mode='same'))
model.add(Convolution2D(256, 3, 3, border_mode='same'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu')); model.add(Dropout(0.4))
model.add(Convolution2D(256, 3, 3, border_mode='same'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu')); model.add(Dropout(0.4))
model.add(Convolution2D(256, 3, 3, border_mode='same'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), border_mode='same'))
model.add(Convolution2D(512, 3, 3, border_mode='same'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu')); model.add(Dropout(0.4))
model.add(Convolution2D(512, 3, 3, border_mode='same'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu')); model.add(Dropout(0.4))
model.add(Convolution2D(512, 3, 3, border_mode='same'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), border_mode='same'))
model.add(Convolution2D(512, 3, 3, border_mode='same'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu')); model.add(Dropout(0.4))
model.add(Convolution2D(512, 3, 3, border_mode='same'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu')); model.add(Dropout(0.4))
model.add(Convolution2D(512, 3, 3, border_mode='same'))
model.add(BatchNormalization(mode=2,axis=1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), border_mode='same'))
model.add(Flatten()); model.add(Dropout(0.5))
model.add(Dense(512))
model.add(BatchNormalization(mode=2))
model.add(Activation('relu')); model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))
return model
| 42.451327
| 84
| 0.707526
| 699
| 4,797
| 4.788269
| 0.075823
| 0.231849
| 0.129071
| 0.206155
| 0.913056
| 0.888258
| 0.876008
| 0.876008
| 0.865252
| 0.852106
| 0
| 0.055764
| 0.110277
| 4,797
| 112
| 85
| 42.830357
| 0.728444
| 0
| 0
| 0.769231
| 0
| 0
| 0.043995
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.038462
| null | null | 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
858bc14668a9bcfda374011f15b6f16e3edfc387
| 4,022
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowMdnsSdServiceList/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxe/tests/ShowMdnsSdServiceList/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxe/tests/ShowMdnsSdServiceList/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output={
"srvc_list":{
"default-mdns-in-service-list":{
"services":{
"apple-airprint":{
"filter_dir":"IN",
"msg_type":"any",
"source":"-",
"loc_filter":"-"
},
"apple-remote-login":{
"filter_dir":"IN",
"msg_type":"any",
"source":"-",
"loc_filter":"-"
},
"apple-screen-share":{
"filter_dir":"IN",
"msg_type":"any",
"source":"-",
"loc_filter":"-"
},
"apple-tv":{
"filter_dir":"IN",
"msg_type":"any",
"source":"-",
"loc_filter":"-"
},
"apple-windows-fileshare":{
"filter_dir":"IN",
"msg_type":"any",
"source":"-",
"loc_filter":"-"
},
"google-chromecast":{
"filter_dir":"IN",
"msg_type":"any",
"source":"-",
"loc_filter":"-"
},
"google-expeditions":{
"filter_dir":"IN",
"msg_type":"any",
"source":"-",
"loc_filter":"-"
},
"homesharing":{
"filter_dir":"IN",
"msg_type":"any",
"source":"-",
"loc_filter":"-"
},
"multifunction-printer":{
"filter_dir":"IN",
"msg_type":"any",
"source":"-",
"loc_filter":"-"
},
"printer-ipps":{
"filter_dir":"IN",
"msg_type":"any",
"source":"-",
"loc_filter":"-"
}
}
},
"default-mdns-out-service-list":{
"services":{
"apple-airprint":{
"filter_dir":"OUT",
"msg_type":"any",
"source":"ALL",
"loc_filter":"default-mdns-location-filter"
},
"apple-remote-login":{
"filter_dir":"OUT",
"msg_type":"any",
"source":"ALL",
"loc_filter":"default-mdns-location-filter"
},
"apple-screen-share":{
"filter_dir":"OUT",
"msg_type":"any",
"source":"ALL",
"loc_filter":"default-mdns-location-filter"
},
"apple-tv":{
"filter_dir":"OUT",
"msg_type":"any",
"source":"ALL",
"loc_filter":"default-mdns-location-filter"
},
"apple-windows-fileshare":{
"filter_dir":"OUT",
"msg_type":"any",
"source":"ALL",
"loc_filter":"default-mdns-location-filter"
},
"google-chromecast":{
"filter_dir":"OUT",
"msg_type":"any",
"source":"ALL",
"loc_filter":"default-mdns-location-filter"
},
"google-expeditions":{
"filter_dir":"OUT",
"msg_type":"any",
"source":"ALL",
"loc_filter":"default-mdns-location-filter"
},
"homesharing":{
"filter_dir":"OUT",
"msg_type":"any",
"source":"ALL",
"loc_filter":"default-mdns-location-filter"
},
"multifunction-printer":{
"filter_dir":"OUT",
"msg_type":"any",
"source":"ALL",
"loc_filter":"default-mdns-location-filter"
},
"printer-ipps":{
"filter_dir":"OUT",
"msg_type":"any",
"source":"ALL",
"loc_filter":"default-mdns-location-filter"
}
}
}
}
}
| 30.240602
| 58
| 0.36176
| 290
| 4,022
| 4.803448
| 0.12069
| 0.129218
| 0.143575
| 0.22972
| 0.949031
| 0.8715
| 0.816942
| 0.770998
| 0.770998
| 0.667624
| 0
| 0
| 0.466932
| 4,022
| 132
| 59
| 30.469697
| 0.64972
| 0
| 0
| 0.772727
| 0
| 0
| 0.378419
| 0.105669
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a4269d1d1f159ffcae5e4329a2d7eefddccf97c8
| 8,894
|
py
|
Python
|
scripts/ssc/models/TopoAE_ext/config_libraries/local_configs/mnist.py
|
MrBellamonte/MT-VAEs-TDA
|
8881b5db607c673fb558f7b74ece27f244b16b77
|
[
"MIT"
] | null | null | null |
scripts/ssc/models/TopoAE_ext/config_libraries/local_configs/mnist.py
|
MrBellamonte/MT-VAEs-TDA
|
8881b5db607c673fb558f7b74ece27f244b16b77
|
[
"MIT"
] | 1
|
2020-09-22T13:04:58.000Z
|
2020-09-22T13:05:23.000Z
|
scripts/ssc/models/TopoAE_ext/config_libraries/local_configs/mnist.py
|
MrBellamonte/AEs-VAEs-TDA
|
8881b5db607c673fb558f7b74ece27f244b16b77
|
[
"MIT"
] | null | null | null |
from src.datasets.datasets import MNIST_offline
from src.evaluation.config import ConfigEval
from src.models.WitnessComplexAE.config import ConfigGrid_WCAE
from src.models.autoencoder.autoencoders import (
ConvAE_MNIST,
ConvAE_MNIST_SMALL, ConvAE_MNIST_3D)
mnist_test = ConfigGrid_WCAE(
learning_rate=[1/100],
batch_size=[1024],
n_epochs=[3],
weight_decay=[1e-6],
early_stopping=[50],
rec_loss_weight=[1],
top_loss_weight=[1],
match_edges=['push_active'],
k=[1],
r_max=[10],
model_class=[ConvAE_MNIST],
model_kwargs=[dict()],
dataset=[MNIST_offline()],
sampling_kwargs=[dict()],
eval=[ConfigEval(
active=False,
evaluate_on='test',
eval_manifold=False,
save_eval_latent=True,
save_train_latent=True,
online_visualization=False,
k_min=4,
k_max=5,
k_step=1,
)],
uid=[''],
toposig_kwargs=[dict()],
method_args=dict(n_jobs=[1], normalize=[True], mu_push=[1.05], online_wc=[True], wc_offline=[
dict(
path_to_data='/Users/simons/MT_data/sync/euler_sync/schsimo/MT/output/WitnessComplexes/mnist/MNIST_offline-bs1024-seed838-noiseNone-6f31dea2')]),
experiment_dir='/output/WAE/mnist_precomputed',
seed=838,
device='cpu',
num_threads=1,
verbose=True,
)
mnist_test2 = ConfigGrid_WCAE(
learning_rate=[1/1000],
batch_size=[1024],
n_epochs=[1],
weight_decay=[1e-6],
early_stopping=[50],
rec_loss_weight=[1],
top_loss_weight=[0],
match_edges=['push_active'],
k=[1],
r_max=[10],
model_class=[ConvAE_MNIST_SMALL],
model_kwargs=[dict()],
dataset=[MNIST_offline()],
sampling_kwargs=[dict()],
eval=[ConfigEval(
active=True,
evaluate_on='test',
eval_manifold=False,
save_eval_latent=True,
save_train_latent=True,
online_visualization=False,
k_min=5,
k_max=45,
k_step=5,
)],
uid=[''],
toposig_kwargs=[dict()],
method_args=dict(n_jobs=[1], normalize=[True], mu_push=[1], online_wc=[True], wc_offline=[dict(
path_to_data='/Users/simons/MT_data/sync/euler_sync/schsimo/MT/output/WitnessComplexes/mnist/MNIST_offline-bs1024-seed838-noiseNone-6f31dea2')]),
experiment_dir='/output/WAE/mnist_precomputed',
seed=838,
device='cpu',
num_threads=2,
verbose=True,
)
mnist_test2 = ConfigGrid_WCAE(
learning_rate=[1/100],
batch_size=[1024],
n_epochs=[250],
weight_decay=[1e-6],
early_stopping=[50],
rec_loss_weight=[1],
top_loss_weight=[1],
match_edges=['push_active'],
k=[1],
r_max=[10],
model_class=[ConvAE_MNIST_SMALL],
model_kwargs=[dict()],
dataset=[MNIST_offline()],
sampling_kwargs=[dict()],
eval=[ConfigEval(
active=True,
evaluate_on='test',
eval_manifold=False,
save_eval_latent=True,
save_train_latent=True,
online_visualization=False,
k_min=5,
k_max=45,
k_step=5,
)],
uid=[''],
toposig_kwargs=[dict()],
method_args=dict(n_jobs=[1], normalize=[True], mu_push=[1], online_wc=[True], wc_offline=[dict(
path_to_data='/Users/simons/MT_data/sync/euler_sync/schsimo/MT/output/WitnessComplexes/mnist/MNIST_offline-bs1024-seed838-noiseNone-6f31dea2')]),
experiment_dir='/output/WAE/mnist_precomputed',
seed=838,
device='cpu',
num_threads=4,
verbose=True,
)
mnist_test3 = ConfigGrid_WCAE(
learning_rate=[1/100],
batch_size=[1024],
n_epochs=[1],
weight_decay=[1e-6],
early_stopping=[50],
rec_loss_weight=[1],
top_loss_weight=[1, 2],
match_edges=['push_active'],
k=[1],
r_max=[10],
model_class=[ConvAE_MNIST_SMALL],
model_kwargs=[dict()],
dataset=[MNIST_offline()],
sampling_kwargs=[dict()],
eval=[ConfigEval(
active=False,
evaluate_on='test',
eval_manifold=False,
save_eval_latent=True,
save_train_latent=True,
online_visualization=False,
k_min=5,
k_max=45,
k_step=5,
)],
uid=[''],
toposig_kwargs=[dict()],
method_args=dict(n_jobs=[1], normalize=[True], mu_push=[1], online_wc=[True], wc_offline=[dict(
path_to_data='/Users/simons/MT_data/sync/euler_sync/schsimo/MT/output/WitnessComplexes/mnist/MNIST_offline-bs1024-seed838-noiseNone-6f31dea2')]),
experiment_dir='output/WAE/mnist_precomp_newid',
seed=838,
device='cpu',
num_threads=1,
verbose=True,
)
mnist_test256 = ConfigGrid_WCAE(
learning_rate=[1/100],
batch_size=[512],
n_epochs=[1],
weight_decay=[1e-6],
early_stopping=[50],
rec_loss_weight=[1],
top_loss_weight=[1, 2],
match_edges=['push_active'],
k=[1],
r_max=[10],
model_class=[ConvAE_MNIST_SMALL],
model_kwargs=[dict()],
dataset=[MNIST_offline()],
sampling_kwargs=[dict()],
eval=[ConfigEval(
active=False,
evaluate_on='test',
eval_manifold=False,
save_eval_latent=True,
save_train_latent=True,
online_visualization=False,
k_min=5,
k_max=45,
k_step=5,
)],
uid=[''],
toposig_kwargs=[dict()],
method_args=dict(n_jobs=[1], normalize=[True], mu_push=[1], online_wc=[True], wc_offline=[dict(
path_to_data='/Users/simons/MT_data/sync/euler_sync/schsimo/MT/output/WitnessComplexes/mnist/MNIST_offline-bs512-seed838-noiseNone-ced06774')]),
experiment_dir='/output/WAE/mnist_precomputed_2',
seed=838,
device='cpu',
num_threads=1,
verbose=True,
)
mnist_test256_1024_leonhard = ConfigGrid_WCAE(
learning_rate=[1/100000],
batch_size=[1024],
n_epochs=[50],
weight_decay=[1e-6],
early_stopping=[50],
rec_loss_weight=[1],
top_loss_weight=[1],
match_edges=['push_active'],
k=[1],
r_max=[10],
model_class=[ConvAE_MNIST],
model_kwargs=[dict()],
dataset=[MNIST_offline()],
sampling_kwargs=[dict()],
eval=[ConfigEval(
active=True,
evaluate_on='test',
eval_manifold=False,
save_eval_latent=True,
save_train_latent=True,
online_visualization=False,
k_min=5,
k_max=45,
k_step=5,
)],
uid=[''],
toposig_kwargs=[dict()],
method_args=dict(n_jobs=[1], normalize=[True], mu_push=[1], online_wc=[True], wc_offline=[dict(
path_to_data='/Users/simons/MT_data/sync/euler_sync/schsimo/MT/output/WitnessComplexes/mnist/MNIST_offline-bs1024-seed838-noiseNone-6f31dea2')]),
experiment_dir='/output/WAE/mnist_precomputed_2',
seed=838,
device='cpu',
num_threads=2,
verbose=True,
)
mnist_test_hd = ConfigGrid_WCAE(
learning_rate=[1/100],
batch_size=[1024],
n_epochs=[3],
weight_decay=[1e-6],
early_stopping=[50],
rec_loss_weight=[1],
top_loss_weight=[1],
match_edges=['push_active'],
k=[1],
r_max=[10],
model_class=[ConvAE_MNIST],
model_kwargs=[dict()],
dataset=[MNIST_offline()],
sampling_kwargs=[dict()],
eval=[ConfigEval(
active=False,
evaluate_on='test',
eval_manifold=False,
save_eval_latent=True,
save_train_latent=True,
online_visualization=False,
k_min=4,
k_max=5,
k_step=1,
)],
uid=[''],
toposig_kwargs=[dict()],
method_args=dict(n_jobs=[1], normalize=[True], mu_push=[1.05], online_wc=[True], wc_offline=[
dict(
path_to_data='/Users/simons/MT_data/sync/euler_sync/schsimo/MT/output/WitnessComplexes/mnist/MNIST_offline-bs1024-seed838-noiseNone-6f31dea2')]),
experiment_dir='/output/WAE/mnist_precomputed',
seed=838,
device='cpu',
num_threads=1,
verbose=True,
)
mnist_test_3d = ConfigGrid_WCAE(
learning_rate=[1/100],
batch_size=[1024],
n_epochs=[3],
weight_decay=[1e-6],
early_stopping=[50],
rec_loss_weight=[1],
top_loss_weight=[1],
match_edges=['push_active'],
k=[1],
r_max=[10],
model_class=[ConvAE_MNIST_3D],
model_kwargs=[dict()],
dataset=[MNIST_offline()],
sampling_kwargs=[dict()],
eval=[ConfigEval(
active=False,
evaluate_on='test',
eval_manifold=False,
save_eval_latent=False,
save_train_latent=True,
online_visualization=False,
k_min=4,
k_max=5,
k_step=1,
)],
uid=[''],
toposig_kwargs=[dict()],
method_args=dict(n_jobs=[1], normalize=[True], mu_push=[1.05], online_wc=[True], wc_offline=[
dict(
path_to_data='/Users/simons/MT_data/sync/euler_sync/schsimo/MT/output/WitnessComplexes/mnist/MNIST_offline-bs1024-seed838-noiseNone-6f31dea2')]),
experiment_dir='/Users/simons/PycharmProjects/MT-VAEs-TDA/scripts/ssc/output',
seed=838,
device='cpu',
num_threads=1,
verbose=False,
)
| 28.876623
| 157
| 0.642231
| 1,161
| 8,894
| 4.608096
| 0.106804
| 0.04486
| 0.030841
| 0.038879
| 0.926168
| 0.917383
| 0.917383
| 0.917383
| 0.903738
| 0.896822
| 0
| 0.048218
| 0.204857
| 8,894
| 307
| 158
| 28.970684
| 0.708286
| 0
| 0
| 0.888889
| 0
| 0.026936
| 0.159546
| 0.143355
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013468
| 0
| 0.013468
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8efd0a348291c80392ed46f20e863ceb48b3dbb6
| 38
|
py
|
Python
|
dwellingplace/views/__init__.py
|
dwelling-place/xlsx-merger
|
fa2343a3acbf3fc7f8b76b30f6ccbc7dc03282ed
|
[
"MIT"
] | null | null | null |
dwellingplace/views/__init__.py
|
dwelling-place/xlsx-merger
|
fa2343a3acbf3fc7f8b76b30f6ccbc7dc03282ed
|
[
"MIT"
] | 56
|
2016-11-05T01:55:22.000Z
|
2016-11-07T00:30:10.000Z
|
dwellingplace/views/__init__.py
|
dwelling-place/xlsx-merger
|
fa2343a3acbf3fc7f8b76b30f6ccbc7dc03282ed
|
[
"MIT"
] | 2
|
2016-11-05T13:42:53.000Z
|
2016-11-05T13:59:09.000Z
|
from . import index
from . import api
| 12.666667
| 19
| 0.736842
| 6
| 38
| 4.666667
| 0.666667
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 38
| 2
| 20
| 19
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f1166f5b4a347a44823daa6b59bf0691acae6aa9
| 29,611
|
py
|
Python
|
baseline/src/nets/gru_vae.py
|
SolomidHero/vcc20_baseline_cyclevae
|
983f4852cefc525013b2168429934a5c46d45484
|
[
"MIT"
] | 131
|
2020-03-11T11:04:30.000Z
|
2022-02-10T12:45:39.000Z
|
baseline/src/nets/gru_vae.py
|
SolomidHero/vcc20_baseline_cyclevae
|
983f4852cefc525013b2168429934a5c46d45484
|
[
"MIT"
] | 9
|
2020-05-27T22:01:17.000Z
|
2022-03-08T13:05:23.000Z
|
baseline/src/nets/gru_vae.py
|
SolomidHero/vcc20_baseline_cyclevae
|
983f4852cefc525013b2168429934a5c46d45484
|
[
"MIT"
] | 18
|
2020-03-15T15:31:15.000Z
|
2021-06-16T02:56:36.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Patrick Lumban Tobing (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
import logging
import sys
import time
import math
import torch
import torch.nn.functional as F
from torch import nn
import numpy as np
def initialize(m):
"""FUNCTION TO INITILIZE NETWORK PARAMETERS
Arg:
m (torch.nn.Module): torch nn module instance
"""
for name, param in m.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(param)
elif 'bias' in name:
nn.init.constant_(param, 0.0)
else:
logging.info("ERROR: " + name)
class TwoSidedDilConv1d(nn.Module):
"""1D TWO-SIDED DILATED CONVOLUTION"""
def __init__(self, in_dim=55, kernel_size=3, layers=2):
super(TwoSidedDilConv1d, self).__init__()
self.in_dim = in_dim
self.kernel_size = kernel_size
self.layers = layers
self.rec_field = self.kernel_size**self.layers
self.padding = int((self.rec_field-1)/2)
self.conv = nn.ModuleList()
for i in range(self.layers):
if i > 0:
self.conv += [nn.Conv1d(self.in_dim*(self.kernel_size**(i)), \
self.in_dim*(self.kernel_size**(i+1)), self.kernel_size, dilation=self.kernel_size**i)]
else:
self.conv += [nn.Conv1d(self.in_dim, self.in_dim*(self.kernel_size**(i+1)), \
self.kernel_size, padding=self.padding)]
def forward(self, x):
"""Forward calculation
Arg:
x (Variable): float tensor variable with the shape (B x C x T)
Return:
(Variable): float tensor variable with the shape (B x C x T)
"""
x = self.conv[0](x)
for i in range(1,self.layers):
x = self.conv[i](x)
return x
class CausalDilConv1d(nn.Module):
"""1D Causal DILATED CONVOLUTION"""
def __init__(self, in_dim=11, kernel_size=2, layers=2):
super(CausalDilConv1d, self).__init__()
self.in_dim = in_dim
self.kernel_size = kernel_size
self.layers = layers
self.padding_list = [self.kernel_size**(i+1)-self.kernel_size**(i) for i in range(self.layers)]
logging.info(self.padding_list)
self.padding = sum(self.padding_list)
self.rec_field = self.padding + 1
self.conv = nn.ModuleList()
for i in range(self.layers):
if i > 0:
self.conv += [nn.Conv1d(self.in_dim*(sum(self.padding_list[:i])+1), \
self.in_dim*(sum(self.padding_list[:i+1])+1), self.kernel_size, \
dilation=self.kernel_size**i)]
else:
self.conv += [nn.Conv1d(self.in_dim, self.in_dim*(sum(self.padding_list[:i+1])+1), \
self.kernel_size, padding=self.padding)]
def forward(self, x):
"""Forward calculation
Arg:
x (Variable): float tensor variable with the shape (B x C x T)
Return:
(Variable): float tensor variable with the shape (B x C x T)
"""
x = self.conv[0](x)
for i in range(1,self.layers):
x = self.conv[i](x)
return x[:,:,:-self.padding]
def sampling_vae_laplace(param, lat_dim=None):
if lat_dim is None:
lat_dim = int(param.shape[1]/2)
mu = param[:,:lat_dim]
sigma = param[:,lat_dim:]
eps = torch.empty(param.shape[0], lat_dim).cuda().uniform_(-0.4999,0.5)
return mu - torch.exp(sigma) * eps.sign() * torch.log1p(-2*eps.abs()) # log_scale
def sampling_vae_laplace_batch(param, lat_dim=None):
if lat_dim is None:
lat_dim = int(param.shape[1]/2)
mu = param[:,:,:lat_dim]
sigma = param[:,:,lat_dim:]
eps = torch.empty(param.shape[0], param.shape[1], lat_dim).cuda().uniform_(-0.4999,0.5)
return mu - torch.exp(sigma) * eps.sign() * torch.log1p(-2*eps.abs()) # log_scale
def loss_vae_laplace(param, clip=False, lat_dim=None):
if lat_dim is None:
lat_dim = int(param.shape[1]/2)
mu = param[:,:lat_dim]
sigma = param[:,lat_dim:]
#if clip and torch.min(sigma) < -10.708206508753178232789577606809: #1e-9
if clip and torch.min(sigma) < -14.162084148244246758816564788835: #1e-12
#sigma = torch.clamp(sigma,min=-7.2543288692621097067625904247823) #1e-6
#sigma = torch.clamp(sigma,min=-10.708206508753178232789577606809) #1e-9
sigma = torch.clamp(sigma,min=-14.162084148244246758816564788835) #1e-12
mu_abs = mu.abs()
scale = torch.exp(sigma)
return torch.mean(torch.sum(-sigma+scale*torch.exp(-mu_abs/scale)+mu_abs-1,1)) # log_scale
class GRU_RNN(nn.Module):
"""GRU-RNN for FEATURE MAPPING
Args:
in_dim (int): input dimension
out_dim (int): RNN output dimension
hidden_units (int): GRU hidden units amount
hidden_layers (int): GRU hidden layers amount
kernel_size (int): kernel size for input convolutional layers
dilation_size (int): dilation size for input convolutional layers
do_prob (float): drop-out probability
scale_in_flag (bool): flag to use input normalization layer
scale_out_flag (bool): flag to use output de-normalization layer
scale_in_out_flag (bool): flag to use output normalization layer for after performing input norm.
(e.g., for Gaussian noise injection)
[Weights & biases of norm/de-norm layers should be set with training data stats]
"""
def __init__(self, in_dim=39, out_dim=35, hidden_units=1024, hidden_layers=1, kernel_size=3, \
dilation_size=2, do_prob=0, scale_in_flag=True, scale_out_flag=True, \
causal_conv=False, spk_dim=None):
super(GRU_RNN, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.hidden_units = hidden_units
self.hidden_layers = hidden_layers
self.kernel_size = kernel_size
self.dilation_size = dilation_size
self.do_prob = do_prob
self.scale_in_flag = scale_in_flag
self.scale_out_flag = scale_out_flag
self.causal_conv = causal_conv
self.spk_dim = spk_dim
# Normalization layer
if self.scale_in_flag:
self.scale_in = nn.Conv1d(self.in_dim, self.in_dim, 1)
# Dilated two-sides convolution layers: -4/0/+4 frames with kernel_size = 3 and dilation_size = 2
# or Dilated causal convolution layers: -8/0 frames with kernel_size = 3 and dilation_size = 2
if not self.causal_conv:
self.conv = TwoSidedDilConv1d(in_dim=self.in_dim, kernel_size=self.kernel_size, \
layers=self.dilation_size)
else:
self.conv = CausalDilConv1d(in_dim=self.in_dim, kernel_size=self.kernel_size, \
layers=self.dilation_size)
self.receptive_field = self.conv.rec_field
self.tot_in_dim = self.in_dim*self.receptive_field+self.out_dim
if self.do_prob > 0:
self.conv_drop = nn.Dropout(p=self.do_prob)
# GRU layer(s)
if self.do_prob > 0 and self.hidden_layers > 1:
self.gru = nn.GRU(self.tot_in_dim, self.hidden_units, self.hidden_layers, \
dropout=self.do_prob, batch_first=True)
else:
self.gru = nn.GRU(self.tot_in_dim, self.hidden_units, self.hidden_layers, batch_first=True)
if self.do_prob > 0:
self.gru_drop = nn.Dropout(p=self.do_prob)
self.out_1 = nn.Conv1d(self.hidden_units, self.out_dim, 1)
# Denormalization layer
if self.scale_out_flag:
self.scale_out = nn.Conv1d(self.out_dim, self.out_dim, 1)
def forward(self, x, y_in, h_in=None, do=False):
"""Forward calculation
Args:
x (Variable): float tensor variable with the shape (T x C_in)
Return:
(Variable): float tensor variable with the shape (T x C_out)
"""
if len(x.shape) > 2:
batch_flag = True
T = x.shape[1]
# Normalization layer
if self.scale_in_flag:
x_in = self.scale_in(x.transpose(1,2)) # B x T x C -> B x C x T
else:
x_in = x.transpose(1,2) # B x T x C -> B x C x T
else:
batch_flag = False
T = x.shape[0]
# Normalization layer
if self.scale_in_flag:
x_in = self.scale_in(torch.unsqueeze(x.transpose(0,1),0)) # T x C -> C x T -> B x C x T
else:
x_in = torch.unsqueeze(x.transpose(0,1),0) # T x C -> C x T -> B x C x T
# Dilated two-sides convolution layers: -4/0/+4 frames with kernel_size = 3 and dilation_size = 2
# or Dilated causal convolution layers: -8/0 frames with kernel_size = 3 and dilation_size = 2
if self.do_prob > 0 and do:
x_conv = self.conv_drop(self.conv(x_in).transpose(1,2)) # T x C --> B x C x T --> B x T x C
else:
x_conv = self.conv(x_in).transpose(1,2) # T x C --> B x C x T --> B x T x C
# GRU and AR layers
# 1st frame
if h_in is not None:
out, h = self.gru(torch.cat((x_conv[:,:1],y_in),2), h_in) # B x T x C
else:
out, h = self.gru(torch.cat((x_conv[:,:1],y_in),2)) # B x T x C
if self.do_prob > 0 and do:
y_in = self.out_1(self.gru_drop(out).transpose(1,2)).transpose(1,2)
else:
y_in = self.out_1(out.transpose(1,2)).transpose(1,2) # B x T x C -> B x C x T -> B x T x C
if self.spk_dim is not None:
y_in = torch.cat((F.selu(y_in[:,:,:self.spk_dim]),y_in[:,:,self.spk_dim:]),2)
trj = y_in
# 2nd-Tth frame
if self.spk_dim is None:
if self.do_prob > 0 and do:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
y_in = self.out_1(self.gru_drop(out).transpose(1,2)).transpose(1,2)
trj = torch.cat((trj, y_in), 1)
else:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
y_in = self.out_1(out.transpose(1,2)).transpose(1,2) # B x T x C -> B x C x T -> B x T x C
trj = torch.cat((trj, y_in), 1)
else:
if self.do_prob > 0 and do:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
y_in = self.out_1(self.gru_drop(out).transpose(1,2)).transpose(1,2)
y_in = torch.cat((F.selu(y_in[:,:,:self.spk_dim]),y_in[:,:,self.spk_dim:]),2)
trj = torch.cat((trj, y_in), 1)
else:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
y_in = self.out_1(out.transpose(1,2)).transpose(1,2) # B x T x C -> B x C x T -> B x T x C
y_in = torch.cat((F.selu(y_in[:,:,:self.spk_dim]),y_in[:,:,self.spk_dim:]),2)
trj = torch.cat((trj, y_in), 1)
# Denormalization layer
if self.scale_out_flag:
if batch_flag:
# B x T x C -> B x C x T -> B x T x C
trj_out = self.scale_out(trj.transpose(1,2)).transpose(1,2)
else:
# B x T x C -> B x C x T -> T x C
trj_out = torch.squeeze(self.scale_out(trj.transpose(1,2)).transpose(1,2),0)
return trj_out, y_in, h
else:
if not batch_flag:
trj = trj.view(-1,self.out_dim)
return trj, y_in, h
class MCDloss(nn.Module):
""" spectral loss based on mel-cepstrum distortion (MCD) """
def __init__(self):
super(MCDloss, self).__init__()
self.frac10ln2 = (10.0/2.3025850929940456840179914546844)
self.sqrt2 = 1.4142135623730950488016887242097
def forward(self, x, y, twf=None, L2=False):
"""
twf is time-warping function, none means exact same time-alignment
L2 means using squared loss (L2-based loss), false means using abs./L1-based loss; default false
"""
if twf is None:
if not L2:
mcd = self.frac10ln2*self.sqrt2*torch.sum(torch.abs(x-y),1)
else:
mcd = self.frac10ln2*torch.sqrt(2.0*torch.sum((x-y).pow(2),1))
else:
if not L2:
mcd = self.frac10ln2*self.sqrt2*torch.sum(torch.abs(torch.index_select(x,0,twf)-y),1)
else:
mcd = self.frac10ln2*torch.sqrt(2.0*torch.sum((torch.index_select(x,0,twf)-y).pow(2),1))
mcd_sum = torch.sum(mcd)
mcd_mean = torch.mean(mcd)
mcd_std = torch.std(mcd)
return mcd_sum, mcd_mean, mcd_std
class GRU_RNN_STOCHASTIC(nn.Module):
"""STOCHASTIC GRU-RNN for FEATURE MAPPING
Args:
in_dim (int): input dimension
out_dim (int): RNN output dimension
hidden_units (int): GRU hidden units amount
hidden_layers (int): GRU hidden layers amount
kernel_size (int): kernel size for input convolutional layers
dilation_size (int): dilation size for input convolutional layers
do_prob (float): drop-out probability
scale_in_flag (bool): flag to use input normalization layer
scale_out_flag (bool): flag to use output de-normalization layer
[Weights & biases of norm/de-norm layers should be set with training data stats]
"""
def __init__(self, in_dim=55, out_dim=50, hidden_units=1024, hidden_layers=1, kernel_size=3, \
dilation_size=2, do_prob=0, spk_dim=None, scale_in_flag=True, scale_out_flag=True, \
causal_conv=False, arparam=True):
super(GRU_RNN_STOCHASTIC, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.hidden_units = hidden_units
self.hidden_layers = hidden_layers
self.kernel_size = kernel_size
self.dilation_size = dilation_size
self.do_prob = do_prob
self.scale_in_flag = scale_in_flag
self.scale_out_flag = scale_out_flag
self.spk_dim = spk_dim
self.causal_conv = causal_conv
if self.spk_dim is not None:
self.mu_dim = self.spk_dim+self.out_dim
else:
self.mu_dim = self.out_dim
self.arparam = arparam
# Normalization layer
if self.scale_in_flag:
self.scale_in = nn.Conv1d(self.in_dim, self.in_dim, 1)
# Dilated two-sides convolution layers: -4/0/+4 frames with kernel_size = 3 and dilation_size = 2
# or Dilated causal convolution layers: -8/0 frames with kernel_size = 3 and dilation_size = 2
if not self.causal_conv:
self.conv = TwoSidedDilConv1d(in_dim=self.in_dim, kernel_size=self.kernel_size, \
layers=self.dilation_size)
else:
self.conv = CausalDilConv1d(in_dim=self.in_dim, kernel_size=self.kernel_size, \
layers=self.dilation_size)
self.receptive_field = self.conv.rec_field
if self.arparam:
if self.spk_dim is not None:
self.tot_in_dim = self.in_dim*self.receptive_field+self.out_dim*2+self.spk_dim
else:
self.tot_in_dim = self.in_dim*self.receptive_field+self.out_dim*2
else:
if self.spk_dim is not None:
self.tot_in_dim = self.in_dim*self.receptive_field+self.out_dim+self.spk_dim
else:
self.tot_in_dim = self.in_dim*self.receptive_field+self.out_dim
if self.do_prob > 0:
self.conv_drop = nn.Dropout(p=self.do_prob)
# GRU layer(s)
if self.do_prob > 0 and self.hidden_layers > 1:
self.gru = nn.GRU(self.tot_in_dim, self.hidden_units, self.hidden_layers, dropout=self.do_prob, \
batch_first=True)
else:
self.gru = nn.GRU(self.tot_in_dim, self.hidden_units, self.hidden_layers, batch_first=True)
if self.do_prob > 0:
self.gru_drop = nn.Dropout(p=self.do_prob)
if self.spk_dim is not None:
self.out_1 = nn.Conv1d(self.hidden_units, self.spk_dim+self.out_dim*2, 1)
else:
self.out_1 = nn.Conv1d(self.hidden_units, self.out_dim*2, 1)
# Denormalization layer
if self.scale_out_flag:
self.scale_out = nn.Conv1d(self.out_dim, self.out_dim, 1)
def forward(self, x, y_in, h_in=None, noise=0, do=False, sampling=True):
"""Forward calculation
Args:
x (Variable): float tensor variable with the shape (T x C_in) or (B x T x C_in)
Return:
(Variable): float tensor variable with the shape (T x C_out) or (B x T x C_out)
"""
if len(x.shape) > 2:
batch_flag = True
T = x.shape[1]
# Normalization layer
if self.scale_in_flag:
x_in = self.scale_in(x.transpose(1,2)) # B x T x C -> B x C x T
else:
x_in = x.transpose(1,2) # B x T x C -> B x C x T
else:
batch_flag = False
T = x.shape[0]
# Normalization layer
if self.scale_in_flag:
x_in = self.scale_in(torch.unsqueeze(x.transpose(0,1),0)) # T x C -> C x T -> B x C x T
else:
x_in = torch.unsqueeze(x.transpose(0,1),0) # T x C -> C x T -> B x C x T
if noise > 0:
x_noise = torch.normal(mean=0, \
std=noise*torch.ones(x_in.shape[0],x_in.shape[1],x_in.shape[2])).cuda()
x_in = x_in + x_noise # B x C x T
# Dilated two-sides convolution layers: -4/0/+4 frames with kernel_size = 3 and dilation_size = 2
# or Dilated causal convolution layers: -8/0 frames with kernel_size = 3 and dilation_size = 2
if self.do_prob > 0 and do:
x_conv = self.conv_drop(self.conv(x_in).transpose(1,2)) # T x C --> B x C x T --> B x T x C
else:
x_conv = self.conv(x_in).transpose(1,2) # T x C --> B x C x T --> B x T x C
# GRU and AR layers
# 1st frame
if h_in is not None:
out, h = self.gru(torch.cat((x_conv[:,:1],y_in),2), h_in) # B x T x C
else:
out, h = self.gru(torch.cat((x_conv[:,:1],y_in),2)) # B x T x C
if self.do_prob > 0 and do:
out = self.gru_drop(out)
out = self.out_1(out.transpose(1,2)).transpose(1,2) # B x T x C -> B x C x T -> B x T x C
if self.spk_dim is not None:
out_param = torch.cat((F.selu(out[:,:,:self.spk_dim]),out[:,:,self.spk_dim:self.mu_dim],\
F.logsigmoid(out[:,:,self.mu_dim:])),2)
if not self.arparam:
if sampling:
out = sampling_vae_laplace_batch(out_param[:,:,self.spk_dim:], lat_dim=self.out_dim)
else:
out = out[:,:,self.spk_dim:self.mu_dim]
else:
out_param = torch.cat((out[:,:,:self.mu_dim],F.logsigmoid(out[:,:,self.mu_dim:])),2)
if not self.arparam:
if sampling:
out = sampling_vae_laplace_batch(out_param, lat_dim=self.out_dim)
else:
out = out[:,:,:self.mu_dim]
trj_out_param = out_param
if not self.arparam:
trj_out = out
if self.arparam:
y_in = out_param
else:
if self.spk_dim is not None:
y_in = torch.cat((out_param[:,:,:self.spk_dim],out),2)
else:
y_in = out
# 2nd-Tth frame
if self.do_prob > 0 and do:
if self.arparam:
if self.spk_dim is not None:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
out = self.out_1(self.gru_drop(out).transpose(1,2)).transpose(1,2)
out_param = torch.cat((F.selu(out[:,:,:self.spk_dim]),\
out[:,:,self.spk_dim:self.mu_dim],F.logsigmoid(out[:,:,self.mu_dim:])),2)
trj_out_param = torch.cat((trj_out_param, out_param),1)
y_in = out_param
else:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
out = self.out_1(self.gru_drop(out).transpose(1,2)).transpose(1,2)
out_param = torch.cat((out[:,:,:self.mu_dim],F.logsigmoid(out[:,:,self.mu_dim:])),2)
trj_out_param = torch.cat((trj_out_param, out_param),1)
y_in = out_param
else:
if sampling:
if self.spk_dim is not None:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
out = self.out_1(self.gru_drop(out).transpose(1,2)).transpose(1,2)
out_param = torch.cat((F.selu(out[:,:,:self.spk_dim]),\
out[:,:,self.spk_dim:self.mu_dim],\
F.logsigmoid(out[:,:,self.mu_dim:])),2)
out = sampling_vae_laplace_batch(out_param[:,:,self.spk_dim:],lat_dim=self.out_dim)
trj_out_param = torch.cat((trj_out_param, out_param),1)
trj_out = torch.cat((trj_out, out), 1)
y_in = torch.cat((out_param[:,:,:self.spk_dim], out),2)
else:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
out = self.out_1(self.gru_drop(out).transpose(1,2)).transpose(1,2)
out_param = torch.cat((out[:,:,:self.mu_dim],\
F.logsigmoid(out[:,:,self.mu_dim:])),2)
out = sampling_vae_laplace_batch(out_param, lat_dim=self.out_dim)
trj_out_param = torch.cat((trj_out_param, out_param),1)
trj_out = torch.cat((trj_out, out), 1)
y_in = out
else:
if self.spk_dim is not None:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
out = self.out_1(self.gru_drop(out).transpose(1,2)).transpose(1,2)
out_param = torch.cat((F.selu(out[:,:,:self.spk_dim]),\
out[:,:,self.spk_dim:self.mu_dim],\
F.logsigmoid(out[:,:,self.mu_dim:])),2)
out = out[:,:,self.spk_dim:self.mu_dim]
trj_out_param = torch.cat((trj_out_param, out_param),1)
trj_out = torch.cat((trj_out, out), 1)
y_in = torch.cat((out_param[:,:,:self.spk_dim], out),2)
else:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
out = self.out_1(self.gru_drop(out).transpose(1,2)).transpose(1,2)
out_param = torch.cat((out[:,:,:self.mu_dim],\
F.logsigmoid(out[:,:,self.mu_dim:])),2)
out = out[:,:,:self.mu_dim]
trj_out_param = torch.cat((trj_out_param, out_param),1)
trj_out = torch.cat((trj_out, out), 1)
y_in = out
else:
if self.arparam:
if self.spk_dim is not None:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
out = self.out_1(out.transpose(1,2)).transpose(1,2)
out_param = torch.cat((F.selu(out[:,:,:self.spk_dim]),\
out[:,:,self.spk_dim:self.mu_dim],\
F.logsigmoid(out[:,:,self.mu_dim:])),2)
trj_out_param = torch.cat((trj_out_param, out_param),1)
y_in = out_param
else:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
out = self.out_1(out.transpose(1,2)).transpose(1,2)
out_param = torch.cat((out[:,:,:self.mu_dim],F.logsigmoid(out[:,:,self.mu_dim:])),2)
trj_out_param = torch.cat((trj_out_param, out_param),1)
y_in = out_param
else:
if sampling:
if self.spk_dim is not None:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
out = self.out_1(out.transpose(1,2)).transpose(1,2)
out_param = torch.cat((F.selu(out[:,:,:self.spk_dim]),\
out[:,:,self.spk_dim:self.mu_dim],\
F.logsigmoid(out[:,:,self.mu_dim:])),2)
out = sampling_vae_laplace_batch(out_param[:,:,self.spk_dim:],lat_dim=self.out_dim)
trj_out_param = torch.cat((trj_out_param, out_param),1)
trj_out = torch.cat((trj_out, out), 1)
y_in = torch.cat((out_param[:,:,:self.spk_dim], out),2)
else:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
out = self.out_1(out.transpose(1,2)).transpose(1,2)
out_param = torch.cat((out[:,:,:self.mu_dim],\
F.logsigmoid(out[:,:,self.mu_dim:])),2)
out = sampling_vae_laplace_batch(out_param, lat_dim=self.out_dim)
trj_out_param = torch.cat((trj_out_param, out_param),1)
trj_out = torch.cat((trj_out, out), 1)
y_in = out
else:
if self.spk_dim is not None:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
out = self.out_1(out.transpose(1,2)).transpose(1,2)
out_param = torch.cat((F.selu(out[:,:,:self.spk_dim]),\
out[:,:,self.spk_dim:self.mu_dim],\
F.logsigmoid(out[:,:,self.mu_dim:])),2)
out = out[:,:,self.spk_dim:self.mu_dim]
trj_out_param = torch.cat((trj_out_param, out_param),1)
trj_out = torch.cat((trj_out, out), 1)
y_in = torch.cat((out_param[:,:,:self.spk_dim], out),2)
else:
for i in range(1,T):
out, h = self.gru(torch.cat((x_conv[:,i:(i+1)],y_in),2), h)
out = self.out_1(out.transpose(1,2)).transpose(1,2)
out_param = torch.cat((out[:,:,:self.mu_dim],\
F.logsigmoid(out[:,:,self.mu_dim:])),2)
out = out[:,:,:self.mu_dim]
trj_out_param = torch.cat((trj_out_param, out_param),1)
trj_out = torch.cat((trj_out, out), 1)
y_in = out
# MAP/Latent-feat sampling
if self.spk_dim is not None:
trj_map = trj_out_param[:,:,self.spk_dim:self.mu_dim]
else:
trj_map = trj_out_param[:,:,:self.mu_dim]
if self.arparam:
if self.spk_dim is not None:
trj_out = trj_out_param[:,:,self.spk_dim:]
else:
trj_out = trj_out_param
if sampling:
trj_out = sampling_vae_laplace_batch(trj_out, lat_dim=self.out_dim)
else:
trj_out = trj_out[:,:,:self.out_dim]
# Denormalization layer
if self.scale_out_flag:
# B x T x C -> B x C x T -> B x T x C
trj_out = self.scale_out(trj_out.transpose(1,2)).transpose(1,2)
trj_map = self.scale_out(trj_map.transpose(1,2)).transpose(1,2)
if not batch_flag:
trj_out = torch.squeeze(trj_out,0) # B x T x C -> B x C x T -> T x C
trj_out_param = torch.squeeze(trj_out_param,0) # B x T x C -> B x C x T -> T x C
trj_map = torch.squeeze(trj_map,0) # B x T x C -> B x C x T -> T x C
return trj_out, trj_out_param, y_in, h, trj_map
| 47.001587
| 111
| 0.525784
| 4,320
| 29,611
| 3.4125
| 0.05787
| 0.0407
| 0.037308
| 0.00814
| 0.848528
| 0.828653
| 0.811694
| 0.795754
| 0.779541
| 0.773911
| 0
| 0.036943
| 0.349127
| 29,611
| 629
| 112
| 47.076312
| 0.727961
| 0.159805
| 0
| 0.74569
| 0
| 0
| 0.000696
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030172
| false
| 0
| 0.019397
| 0
| 0.079741
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f141bffdd3ea5ed6c35862937204f53d91535b73
| 25,556
|
py
|
Python
|
RealOptions/App.py
|
JohnnyEngineer/RealOptions
|
5d58f26844fc3d98c73281e4083a73095195edad
|
[
"Apache-2.0"
] | null | null | null |
RealOptions/App.py
|
JohnnyEngineer/RealOptions
|
5d58f26844fc3d98c73281e4083a73095195edad
|
[
"Apache-2.0"
] | null | null | null |
RealOptions/App.py
|
JohnnyEngineer/RealOptions
|
5d58f26844fc3d98c73281e4083a73095195edad
|
[
"Apache-2.0"
] | null | null | null |
from Functions import*
class App(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def switch_frame(self, frame_class):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
class StartPage(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Label(self, text="Menu Inicial", font=('Arial', 12, "bold")).pack(side="top", fill="x", pady=5)
tk.Button(self, text="Estimação Volatilidade",width=40,
command=lambda: master.switch_frame(EstimarVolatilidade)).pack()
tk.Button(self, text="Simulação Monte Carlo Valores",width=40,
command=lambda: master.switch_frame(MonteCarloValores)).pack()
tk.Button(self, text="Opção de Abandono",width=40,
command=lambda: master.switch_frame(OpcaoAbandono)).pack()
tk.Button(self, text="Opção de Expandir",width=40,
command=lambda: master.switch_frame(OpcaoExpandir)).pack()
tk.Button(self, text="Opção de Contrair",width=40,
command=lambda: master.switch_frame(OpcaoContracao)).pack()
tk.Button(self, text="Opções Compostas",width=40,
command=lambda: master.switch_frame(OpcoesCompostas)).pack()
tk.Button(self, text="Opções de Esperar",width=40,
command=lambda: master.switch_frame(OpcoesEsperar)).pack()
tk.Button(self, text="Opções Modelo Black-Scholes",width=40,
command=lambda: master.switch_frame(OpcaoBlackScholes)).pack()
class MonteCarloValores(tk.Frame):
def NumeroValidation(self,S):
if S in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ',','.']:
return True
else:
messagebox.showinfo('Alerta!', 'Digite apenas números')
return False
def Simular(self,tkWindow, retorno, volatilidade,precoinicial):
annual_drift=retorno.get()
annual_volatility=volatilidade.get()
initial_stock = precoinicial.get()
sim=MonteCarloSimulation(annual_drift,annual_volatility,initial_stock, periodo=4,
quantidade_simulacao=1000, imagem=True, tkWindow=tkWindow)
def __init__(self, master):
global app
tk.Frame.__init__(self, master)
tk.Frame.configure(self)
retorno=tk.DoubleVar()
volatilidade=tk.DoubleVar()
precoinicial=tk.DoubleVar()
tk.Label(self, text="Digite o valor esperado do retorno anual(10%=0.1):",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self, textvariable=retorno, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor estimado da volatilidade anual(10%=0.1):",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self, textvariable=volatilidade, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor do preço inicial:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=precoinicial, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Button(self, text="Simular",
command=lambda: self.Simular(app,retorno, volatilidade,precoinicial)).pack()
tk.Button(self, text="Voltar para página inical",
command=lambda: master.switch_frame(StartPage)).pack()
class OpcaoAbandono(tk.Frame):
def PrecificarOpcao(self,T,S0,sigma,rf,K):
global app
T=int(T.get())
S0=S0.get()
sigma=sigma.get()
rf=rf.get()
K=K.get()
binomial_model_abandono(T=T, S0=S0, sigma=sigma, rf=rf, K=K, nome="teste", dpi=600, imagem=True,
tkWindow=app)
def NumeroValidation(self,S):
if S in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ',','.']:
return True
else:
messagebox.showinfo('Alerta!', 'Digite apenas números')
return False
def __init__(self, master):
tk.Frame.__init__(self, master)
df=None
df1=None
global app
periodo=tk.IntVar()
valor=tk.DoubleVar()
volatilidade=tk.DoubleVar()
taxalivrederisco=tk.DoubleVar()
valorresidual=tk.DoubleVar()
tk.Frame.configure(self)
tk.Label(self, text="Digite o período do projeto (em anos):",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=periodo, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor do investimento:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=valor, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite a volatilidade:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=volatilidade, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor da taxa livre de risco:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=taxalivrederisco, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor residual:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=valorresidual, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Button(self, text="Calcular",
command=lambda: self.PrecificarOpcao(T=periodo,S0=valor,sigma=volatilidade,
rf=taxalivrederisco,K=valorresidual)).pack()
tk.Button(self, text="Voltar para página inical",
command=lambda: master.switch_frame(StartPage)).pack()
class OpcaoExpandir(tk.Frame):
def PrecificarOpcao(self,T,S0,sigma,rf,fator_expansao,custo_expansao):
global app
T=int(T.get())
S0=S0.get()
sigma=sigma.get()
rf=rf.get()
fator_expansao=fator_expansao.get()
custo_expansao=custo_expansao.get()
binomial_model_expansion(T=T, S0=S0, sigma=sigma, rf=rf, nome="teste", dpi=600, imagem=True,
fator_expansao=fator_expansao, custo_expansao=custo_expansao, tkWindow=app)
def NumeroValidation(self,S):
if S in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ',','.']:
return True
else:
messagebox.showinfo('Alerta!', 'Digite apenas números')
return False
def __init__(self, master):
tk.Frame.__init__(self, master)
df=None
df1=None
global app
periodo=tk.IntVar()
valor=tk.DoubleVar()
volatilidade=tk.DoubleVar()
taxalivrederisco=tk.DoubleVar()
fator_expansao=tk.DoubleVar()
custo_expansao=tk.DoubleVar()
tk.Frame.configure(self)
tk.Label(self, text="Digite o período do projeto (em anos):",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=periodo, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor do investimento:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=valor, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite a volatilidade:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=volatilidade, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor da taxa livre de risco:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=taxalivrederisco, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o fator de expansão:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=fator_expansao, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o custo de expansão:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=custo_expansao, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Button(self, text="Calcular",
command=lambda: self.PrecificarOpcao(T=periodo,S0=valor,sigma=volatilidade,
rf=taxalivrederisco,fator_expansao=fator_expansao,
custo_expansao=custo_expansao)).pack()
tk.Button(self, text="Voltar para página inical",
command=lambda: master.switch_frame(StartPage)).pack()
class OpcaoContracao(tk.Frame):
def PrecificarOpcao(self,T,S0,sigma,rf,fator_contracao,economia_contracao):
global app
T=int(T.get())
S0=S0.get()
sigma=sigma.get()
rf=rf.get()
fator_contracao=fator_contracao.get()
economia_contracao=economia_contracao.get()
binomial_model_contraction(T=T, S0=S0, sigma=sigma, rf=rf, nome="teste", dpi=600, imagem=True,
fator_contracao=fator_contracao, economia_contracao=economia_contracao,
tkWindow=app)
def NumeroValidation(self,S):
if S in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ',','.']:
return True
else:
messagebox.showinfo('Alerta!', 'Digite apenas números')
return False
def __init__(self, master):
tk.Frame.__init__(self, master)
df=None
df1=None
global app
periodo=tk.IntVar()
valor=tk.DoubleVar()
volatilidade=tk.DoubleVar()
taxalivrederisco=tk.DoubleVar()
fator_contracao=tk.DoubleVar()
economia_contracao=tk.DoubleVar()
tk.Frame.configure(self)
tk.Label(self, text="Digite o período do projeto (em anos):",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=periodo, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor do investimento:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=valor, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite a volatilidade:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=volatilidade, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor da taxa livre de risco:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=taxalivrederisco, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o fator de contração:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=fator_contracao, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite a economia na contração:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=economia_contracao, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Button(self, text="Calcular",
command=lambda: self.PrecificarOpcao(T=periodo,S0=valor,sigma=volatilidade,
rf=taxalivrederisco,fator_contracao=fator_contracao,
economia_contracao=economia_contracao)).pack()
tk.Button(self, text="Voltar para página inical",
command=lambda: master.switch_frame(StartPage)).pack()
class OpcoesCompostas(tk.Frame):
def PrecificarOpcao(self,T,S0,sigma,rf,custos):
global app
T=int(T.get())
S0=S0.get()
sigma=sigma.get()
rf=rf.get()
itens=list(self.listbox.get(0,END))
binomial_model_compounds(T=T, S0=S0, sigma=sigma, rf=rf, custos=itens, nome="teste", dpi=600,
imagem=True, tkWindow=app)
def AdicionarItem(self):
self.listbox.insert(tk.END,self.custos.get())
def RemoverItem(self):
self.listbox.delete(self.listbox.curselection())
def LimparListBox(self):
self.listbox.delete(0,END)
def NumeroValidation(self,S):
if S in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ',','.']:
return True
else:
messagebox.showinfo('Alerta!', 'Digite apenas números')
return False
def __init__(self, master):
tk.Frame.__init__(self, master)
df=None
df1=None
global app
periodo=tk.IntVar()
valor=tk.DoubleVar()
volatilidade=tk.DoubleVar()
taxalivrederisco=tk.DoubleVar()
self.custos=tk.DoubleVar()
tk.Label(self, text="Digite o período do projeto (em anos):",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=periodo, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor do investimento:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=valor, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite a volatilidade:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=volatilidade, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor da taxa livre de risco:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=taxalivrederisco, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Adicione os custos:",
font=('Arial', 10, "bold")).pack()
self.listbox = Listbox(self)
self.listbox.pack()
textBox=tk.Entry(self,textvariable=self.custos, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Button(self, text="Adicionar Custo",
command=self.AdicionarItem).pack()
tk.Button(self, text="Remover Custo",
command=self.RemoverItem).pack()
tk.Button(self, text="Limpar toda lista de custos",
command=self.LimparListBox).pack()
tk.Button(self, text="Calcular",
command=lambda: self.PrecificarOpcao(T=periodo,S0=valor,sigma=volatilidade,
rf=taxalivrederisco,custos=self.custos)).pack()
tk.Button(self, text="Voltar para página inical",
command=lambda: master.switch_frame(StartPage)).pack()
class OpcoesEsperar(tk.Frame):
def PrecificarOpcao(self,T,S0,sigma,rf,custos):
global app
T=int(T.get())
S0=S0.get()
sigma=sigma.get()
rf=rf.get()
itens=list(self.listbox.get(0,END))
binomial_model_dynamicstrikes(T=T, S0=S0, sigma=sigma, rf=rf, custos=itens, nome="teste", dpi=600,
imagem=True, tkWindow=app)
def AdicionarItem(self):
self.listbox.insert(tk.END,self.custos.get())
def RemoverItem(self):
self.listbox.delete(self.listbox.curselection())
def LimparListBox(self):
self.listbox.delete(0,END)
def NumeroValidation(self,S):
if S in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ',','.']:
return True
else:
messagebox.showinfo('Alerta!', 'Digite apenas números')
return False
def __init__(self, master):
tk.Frame.__init__(self, master)
df=None
df1=None
global app
periodo=tk.IntVar()
valor=tk.DoubleVar()
volatilidade=tk.DoubleVar()
taxalivrederisco=tk.DoubleVar()
self.custos=tk.DoubleVar()
tk.Label(self, text="Digite o período do projeto (em anos):",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=periodo, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor do investimento:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=valor, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite a volatilidade:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=volatilidade, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor da taxa livre de risco:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=taxalivrederisco, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Adicione os custos:",
font=('Arial', 10, "bold")).pack()
self.listbox = Listbox(self)
self.listbox.pack()
textBox=tk.Entry(self,textvariable=self.custos, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Button(self, text="Adicionar Custo",
command=self.AdicionarItem).pack()
tk.Button(self, text="Remover Custo",
command=self.RemoverItem).pack()
tk.Button(self, text="Limpar toda lista de custos",
command=self.LimparListBox).pack()
tk.Button(self, text="Calcular",
command=lambda: self.PrecificarOpcao(T=periodo,S0=valor,sigma=volatilidade,
rf=taxalivrederisco,custos=self.custos)).pack()
tk.Button(self, text="Voltar para página inical",
command=lambda: master.switch_frame(StartPage)).pack()
class OpcaoBlackScholes(tk.Frame):
def PrecificarOpcao(self,T,S0,sigma,rf,precoexercicio,dividendos):
global app
T=int(T.get())
S0=S0.get()
sigma=sigma.get()
rf=rf.get()
precoexercicio=precoexercicio.get()
dividendos=dividendos.get()
x = BSMOptionValuation(S0=S0, K=precoexercicio, T=T, r=rf, sigma=sigma, div_yield=dividendos)
messagebox.showinfo('Precificaão Modelo Black-Scholes-Merton', 'A call possui preço de: '+str(np.round(x.call_value(),2))+"."+
" O valor da put é de:"+str(np.round(x.put_value(),2)))
def NumeroValidation(self,S):
if S in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ',','.']:
return True
else:
messagebox.showinfo('Alerta!', 'Digite apenas números')
return False
def __init__(self, master):
tk.Frame.__init__(self, master)
df=None
df1=None
global app
periodo=tk.IntVar()
valor=tk.DoubleVar()
volatilidade=tk.DoubleVar()
taxalivrederisco=tk.DoubleVar()
precoexercicio=tk.DoubleVar()
dividendos=tk.DoubleVar()
tk.Frame.configure(self)
tk.Label(self, text="Digite o período do projeto (em anos):",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=periodo, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor do investimento:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=valor, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite a volatilidade:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=volatilidade, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor da taxa livre de risco:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=taxalivrederisco, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o preço do exercício:",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=precoexercicio, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Label(self, text="Digite o valor dos dividendos (em %):",
font=('Arial', 10, "bold")).pack()
textBox=tk.Entry(self,textvariable=dividendos, validate="key")
textBox['validatecommand'] = (self.register(self.NumeroValidation),'%S')
textBox.pack()
tk.Button(self, text="Calcular",
command=lambda: self.PrecificarOpcao(T=periodo,S0=valor,sigma=volatilidade,
rf=taxalivrederisco,precoexercicio=precoexercicio,
dividendos=dividendos)).pack()
tk.Button(self, text="Voltar para página inical",
command=lambda: master.switch_frame(StartPage)).pack()
class EstimarVolatilidade(tk.Frame):
def CalcularVolatilidade(self):
itens=list(self.listbox.get(0,END))
messagebox.showinfo('Volatilidade', 'A volatilidade do fluxo de caixa informado é de: '+str(np.round(Volatilidade(itens)[0]*100,2))+"%")
def NumeroValidation(self,S):
if S in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'.']:
return True
else:
messagebox.showinfo('Alerta!', 'Digite apenas números')
return False
def AdicionarItem(self):
self.listbox.insert(tk.END,self.caixa.get())
def RemoverItem(self):
self.listbox.delete(self.listbox.curselection())
def LimparListBox(self):
self.listbox.delete(0,END)
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self)
self.listbox = Listbox(self)
self.listbox.pack()
self.caixa=tk.DoubleVar()
tk.Label(self, text="Insira o valor do fluxo de caixa:",
font=('Arial', 10, "bold")).pack()
self.entrada=tk.Entry(self,textvariable=self.caixa, validate="key")
self.entrada['validatecommand'] = (self.register(self.NumeroValidation),'%S')
self.entrada.pack()
tk.Button(self, text="Adicionar Fluxo de Caixa",
command=self.AdicionarItem).pack()
tk.Button(self, text="Remover Fluxo de Caixa",
command=self.RemoverItem).pack()
tk.Button(self, text="Calcular Volatilidade",
command=self.CalcularVolatilidade).pack()
tk.Button(self, text="Limpar Fluxo de Caixa",
command=self.LimparListBox).pack()
tk.Button(self, text="Voltar para página inical",
command=lambda: master.switch_frame(StartPage)).pack()
if __name__ == "__main__":
app = App()
app.title("Análise de Opções Reais v1.0")
app.geometry("400x550")
app.mainloop()
| 47.413729
| 144
| 0.587338
| 2,821
| 25,556
| 5.260191
| 0.077632
| 0.038278
| 0.028169
| 0.038412
| 0.850866
| 0.840286
| 0.824247
| 0.794191
| 0.772154
| 0.754903
| 0
| 0.014038
| 0.264126
| 25,556
| 539
| 145
| 47.413729
| 0.775019
| 0
| 0
| 0.759843
| 0
| 0
| 0.134249
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070866
| false
| 0
| 0.001969
| 0
| 0.124016
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f170d0f1d8fd4d722ab55a60a0c421e40ca639ab
| 841
|
py
|
Python
|
tests/rules/test_min.py
|
mateuszz0000/Validator
|
80dde6dd9bcbc4e0fb5815c1415c40e7357e98bd
|
[
"MIT"
] | null | null | null |
tests/rules/test_min.py
|
mateuszz0000/Validator
|
80dde6dd9bcbc4e0fb5815c1415c40e7357e98bd
|
[
"MIT"
] | null | null | null |
tests/rules/test_min.py
|
mateuszz0000/Validator
|
80dde6dd9bcbc4e0fb5815c1415c40e7357e98bd
|
[
"MIT"
] | null | null | null |
from validator.rules import Min
def test_min_01():
rule = Min(18)
value_to_check = 23
assert rule.check(value_to_check)
rule = Min(18)
value_to_check = 13
assert not rule.check(value_to_check)
def test_min_02():
rule = Min(-18)
value_to_check = 0
assert rule.check(value_to_check)
rule = Min(0)
value_to_check = 100
assert rule.check(value_to_check)
rule = Min(0)
value_to_check = 100
assert rule.check(value_to_check)
rule = Min(999)
value_to_check = -999
assert not rule.check(value_to_check)
def test_min_03():
rule = Min(10)
value_to_check = 10
assert rule.check(value_to_check)
rule = Min(0)
value_to_check = 0
assert rule.check(value_to_check)
rule = Min(-23)
value_to_check = -23
assert rule.check(value_to_check)
| 19.113636
| 41
| 0.665874
| 136
| 841
| 3.808824
| 0.169118
| 0.243243
| 0.416988
| 0.277992
| 0.830116
| 0.830116
| 0.76834
| 0.76834
| 0.754826
| 0.754826
| 0
| 0.064364
| 0.242568
| 841
| 43
| 42
| 19.55814
| 0.748823
| 0
| 0
| 0.580645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.290323
| 1
| 0.096774
| false
| 0
| 0.032258
| 0
| 0.129032
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
74ada74ef3f2d6be38148e598b5a057a020daeb5
| 68,370
|
py
|
Python
|
test/anchor_selection_node_test.py
|
inomuh/indoor_localization
|
8d8769659fac91d049d98bc8a8a3acd420412e0f
|
[
"Apache-2.0"
] | 44
|
2019-01-06T18:42:24.000Z
|
2022-03-06T19:51:10.000Z
|
test/anchor_selection_node_test.py
|
elcinerdogan/indoor_localization
|
8d670f21ac606dbca11ee40d51d24c3ff96f873a
|
[
"Apache-2.0"
] | 3
|
2019-10-11T23:37:15.000Z
|
2021-02-23T07:45:59.000Z
|
test/anchor_selection_node_test.py
|
elcinerdogan/indoor_localization
|
8d670f21ac606dbca11ee40d51d24c3ff96f873a
|
[
"Apache-2.0"
] | 15
|
2019-02-12T06:01:08.000Z
|
2021-06-24T04:13:05.000Z
|
#!/usr/bin/env python
# -- coding: utf-8 --
# license removed for brevity
import rospy
import sys
import unittest
import numpy as np
import indoor_localization.anchor_selection_node as asn
PKG = 'indoor_localization'
NAME = 'anchor_selection_node_test'
class TestAnchorSelection(unittest.TestCase):
ips_dict = {
'AnchorID': [0, 5, 6, 8, 9, 11, 12, 13, 17, 19],
'x': [120, 120, 140, 140, 140, 160, 160, 160, 180, 180],
'y': [200, 180, 200, 180, 160, 200, 180, 160, 200, 180],
'z': [4.203356, 5.483369, 4.848520, 5.861276, 6.814601,
5.328804, 6.586562, 4.161473, 6.862733, 4.114005],
'tdoa_of_anchors': [-0.8984051424918285, 5.549387626398598, 4.363982236798634,
7.899062598699146, -18.95164370106358, -8.488891630092848,
-3.2857739316623658, -13.973034128654382, -6.113682736170411]
}
last_pos = {
'Tx': 155.0,
'Ty': 196.0,
'Tz': 0.5
}
selected_anchors_dict_1D = {
'AnchorID': [0, 5],
'x': [120, 120],
'y': [200, 180],
'z': [4.203356, 5.483369],
'tdoa_of_anchors': [5]
}
##################################################################################################
def test_ind_of_tag(self):
tag_x = self.last_pos['Tx']
tag_y = self.last_pos['Ty']
tag_z = self.last_pos['Tz']
position_list = [tag_x, tag_y, tag_z]
tested = asn.ind_of_tag(position_list)
test_result = np.array([155.0, 196.0, 0.5])
np.testing.assert_array_almost_equal(tested, test_result, 4)
def test_ind_of_anch(self):
row = len(self.ips_dict['AnchorID'])
list_x = list(self.ips_dict['x'])
list_y = list(self.ips_dict['y'])
list_z = list(self.ips_dict['z'])
tested = np.array(asn.ind_of_anch(self.ips_dict, row))
test_result = np.array([
[list_x[0], list_y[0], list_z[0]],
[list_x[1], list_y[1], list_z[1]],
[list_x[2], list_y[2], list_z[2]],
[list_x[3], list_y[3], list_z[3]],
[list_x[4], list_y[4], list_z[4]],
[list_x[5], list_y[5], list_z[5]],
[list_x[6], list_y[6], list_z[6]],
[list_x[7], list_y[7], list_z[7]],
[list_x[8], list_y[8], list_z[8]],
[list_x[9], list_y[9], list_z[9]]
], dtype=float)
np.testing.assert_array_equal(tested, test_result)
def test_anch_tag_distance(self):
row = len(self.ips_dict['AnchorID'])
list_x = list(self.ips_dict['x'])
list_y = list(self.ips_dict['y'])
list_z = list(self.ips_dict['z'])
tag_index = np.array([155.0, 196.0, 0.5])
anchor_index = np.array([
[list_x[0], list_y[0], list_z[0]],
[list_x[1], list_y[1], list_z[1]],
[list_x[2], list_y[2], list_z[2]],
[list_x[3], list_y[3], list_z[3]],
[list_x[4], list_y[4], list_z[4]],
[list_x[5], list_y[5], list_z[5]],
[list_x[6], list_y[6], list_z[6]],
[list_x[7], list_y[7], list_z[7]],
[list_x[8], list_y[8], list_z[8]],
[list_x[9], list_y[9], list_z[9]]
], dtype=float)
tested = asn.anch_tag_distance(row, tag_index, anchor_index)
test_result = np.array([
[35.000, 4.000, 35.22783],
[35.000, 16.000, 38.48376281],
[15.000, 4.000, 15.524174696],
[15.000, 16.000, 21.931712199],
[15.000, 36.000, 39],
[5.000, 4.000, 6.403124237],
[5.000, 16.000, 16.763054614],
[5.000, 36.000, 36.345563691],
[25.000, 4.000, 25.317977802],
[25.000, 16.000, 29.681644159],
])
np.testing.assert_array_almost_equal(tested, test_result, 4)
def test_ind_of_anch_tag_distance(self):
row = len(self.ips_dict['AnchorID'])
list_x = list(self.ips_dict['x'])
list_y = list(self.ips_dict['y'])
list_z = list(self.ips_dict['z'])
tag_anchor_distance_calcu = np.array([
[35.000, 4.000, 35.22783], # 120 200
[35.000, 16.000, 38.48376281], # 120, 180
[15.000, 4.000, 15.524174696], # 140, 200
[15.000, 16.000, 21.931712199], # 140, 180,
[15.000, 36.000, 39], # 140, 160
[5.000, 4.000, 6.403124237], # 160, 200
[5.000, 16.000, 16.763054614], # 160, 180
[5.000, 36.000, 36.345563691], # 160, 160
[25.000, 4.000, 25.317977802], # 180, 200
[25.000, 16.000, 29.681644159], # 180, 180
])
anchor_index = np.array([
[list_x[0], list_y[0], list_z[0]],
[list_x[1], list_y[1], list_z[1]],
[list_x[2], list_y[2], list_z[2]],
[list_x[3], list_y[3], list_z[3]],
[list_x[4], list_y[4], list_z[4]],
[list_x[5], list_y[5], list_z[5]],
[list_x[6], list_y[6], list_z[6]],
[list_x[7], list_y[7], list_z[7]],
[list_x[8], list_y[8], list_z[8]],
[list_x[9], list_y[9], list_z[9]]
], dtype=float)
tested = asn.ind_of_anch_tag_distance(row, tag_anchor_distance_calcu, anchor_index)
test_result = np.array([
[120, 200, 4.203356, 35.22783],
[120, 180, 5.483369, 38.48376281],
[140, 200, 4.848520, 15.524174696],
[140, 180, 5.861276, 21.931712199],
[140, 160, 6.814601, 39],
[160, 200, 5.328804, 6.403124237],
[160, 180, 6.586562, 16.763054614],
[160, 160, 4.161473, 36.345563691],
[180, 200, 6.862733, 25.317977802],
[180, 180, 4.114005, 29.681644159]
])
np.testing.assert_array_almost_equal(tested, test_result, 4)
"""
test_result = np.array([
[120, 180, 5.483369, 38.48376281],
[120, 200, 4.203356, 35.22783],
[140, 160, 6.814601, 39],
[140, 180, 5.861276, 21.931712199],
[140, 200, 4.848520, 15.524174696],
[160, 160, 4.161473, 36.345563691],
[160, 180, 6.586562, 16.763054614],
[160, 200, 5.328804, 6.403124237],
[180, 180, 4.114005, 29.681644159],
[180, 200, 6.862733, 25.317977802]
])
"""
def test_sorted_anch(self):
row = len(self.ips_dict['AnchorID'])
tag_anchor_index_distance = np.array([
[120, 200, 4.203356, 35.22783],
[120, 180, 5.483369, 38.48376281],
[140, 200, 4.848520, 15.524174696],
[140, 180, 5.861276, 21.931712199],
[140, 160, 6.814601, 39],
[160, 200, 5.328804, 6.403124237],
[160, 180, 6.586562, 16.763054614],
[160, 160, 4.161473, 36.345563691],
[180, 200, 6.862733, 25.317977802],
[180, 180, 4.114005, 29.681644159]
])
tested = asn.sorted_anch(row, tag_anchor_index_distance)
test_result = np.array([
[160, 200, 5.328804, 6.403124237],
[140, 200, 4.848520, 15.524174696],
[160, 180, 6.586562, 16.763054614],
[140, 180, 5.861276, 21.931712199],
[180, 200, 6.862733, 25.317977802],
[180, 180, 4.114005, 29.681644159],
[120, 200, 4.203356, 35.22783],
[160, 160, 4.161473, 36.345563691],
[120, 180, 5.483369, 38.48376281],
[140, 160, 6.814601, 39]
])
np.testing.assert_array_almost_equal(tested, test_result, 4)
def test_listed_anch(self):
row = len(self.ips_dict['AnchorID'])
tag_anchor_sorting = np.array([
[160, 200, 5.328804, 6.403124237],
[140, 200, 4.848520, 15.524174696],
[160, 180, 6.586562, 16.763054614],
[140, 180, 5.861276, 21.931712199],
[180, 200, 6.862733, 25.317977802],
[180, 180, 4.114005, 29.681644159],
[120, 200, 4.203356, 35.22783],
[160, 160, 4.161473, 36.345563691],
[120, 180, 5.483369, 38.48376281],
[140, 160, 6.814601, 39]
])
tested = asn. listed_anch(row, tag_anchor_sorting)
test_result = np.array([
[160, 200, 5.328804, 6.403124237, 0],
[140, 200, 4.848520, 15.524174696, 1],
[160, 180, 6.586562, 16.763054614, 2],
[140, 180, 5.861276, 21.931712199, 3],
[180, 200, 6.862733, 25.317977802, 4],
[180, 180, 4.114005, 29.681644159, 5],
[120, 200, 4.203356, 35.22783, 6],
[160, 160, 4.161473, 36.345563691, 7],
[120, 180, 5.483369, 38.48376281, 8],
[140, 160, 6.814601, 39, 9]
])
np.testing.assert_array_almost_equal(tested, test_result, 4)
def test_anch_combination(self):
row = len(self.ips_dict['AnchorID'])
tested = asn.anch_combination(row)
test_result = np.array(
[
[0, 1, 2],
[0, 1, 3],
[0, 1, 4],
[0, 1, 5],
[0, 1, 6],
[0, 1, 7],
[0, 1, 8],
[0, 1, 9],
[0, 2, 3],
[0, 2, 4],
[0, 2, 5],
[0, 2, 6],
[0, 2, 7],
[0, 2, 8],
[0, 2, 9],
[0, 3, 4],
[0, 3, 5],
[0, 3, 6],
[0, 3, 7],
[0, 3, 8],
[0, 3, 9],
[0, 4, 5],
[0, 4, 6],
[0, 4, 7],
[0, 4, 8],
[0, 4, 9],
[0, 5, 6],
[0, 5, 7],
[0, 5, 8],
[0, 5, 9],
[0, 6, 7],
[0, 6, 8],
[0, 6, 9],
[0, 7, 8],
[0, 7, 9],
[0, 8, 9],
[1, 2, 3],
[1, 2, 4],
[1, 2, 5],
[1, 2, 6],
[1, 2, 7],
[1, 2, 8],
[1, 2, 9],
[1, 3, 4],
[1, 3, 5],
[1, 3, 6],
[1, 3, 7],
[1, 3, 8],
[1, 3, 9],
[1, 4, 5],
[1, 4, 6],
[1, 4, 7],
[1, 4, 8],
[1, 4, 9],
[1, 5, 6],
[1, 5, 7],
[1, 5, 8],
[1, 5, 9],
[1, 6, 7],
[1, 6, 8],
[1, 6, 9],
[1, 7, 8],
[1, 7, 9],
[1, 8, 9],
[2, 3, 4],
[2, 3, 5],
[2, 3, 6],
[2, 3, 7],
[2, 3, 8],
[2, 3, 9],
[2, 4, 5],
[2, 4, 6],
[2, 4, 7],
[2, 4, 8],
[2, 4, 9],
[2, 5, 6],
[2, 5, 7],
[2, 5, 8],
[2, 5, 9],
[2, 6, 7],
[2, 6, 8],
[2, 6, 9],
[2, 7, 8],
[2, 7, 9],
[2, 8, 9],
[3, 4, 5],
[3, 4, 6],
[3, 4, 7],
[3, 4, 8],
[3, 4, 9],
[3, 5, 6],
[3, 5, 7],
[3, 5, 8],
[3, 5, 9],
[3, 6, 7],
[3, 6, 8],
[3, 6, 9],
[3, 7, 8],
[3, 7, 9],
[3, 8, 9],
[4, 5, 6],
[4, 5, 7],
[4, 5, 8],
[4, 5, 9],
[4, 6, 7],
[4, 6, 8],
[4, 6, 9],
[4, 7, 8],
[4, 7, 9],
[4, 8, 9],
[5, 6, 7],
[5, 6, 8],
[5, 6, 9],
[5, 7, 8],
[5, 7, 9],
[5, 8, 9],
[6, 7, 8],
[6, 7, 9],
[6, 8, 9],
[7, 8, 9]
]
)
np.testing.assert_equal(tested, test_result)
def test_tmp_anch_combination(self):
row = len(self.ips_dict['AnchorID'])
comb_anch = np.array(
[
[0, 1, 2],
[0, 1, 3],
[0, 1, 4],
[0, 1, 5],
[0, 1, 6],
[0, 1, 7],
[0, 1, 8],
[0, 1, 9],
[0, 2, 3],
[0, 2, 4],
[0, 2, 5],
[0, 2, 6],
[0, 2, 7],
[0, 2, 8],
[0, 2, 9],
[0, 3, 4],
[0, 3, 5],
[0, 3, 6],
[0, 3, 7],
[0, 3, 8],
[0, 3, 9],
[0, 4, 5],
[0, 4, 6],
[0, 4, 7],
[0, 4, 8],
[0, 4, 9],
[0, 5, 6],
[0, 5, 7],
[0, 5, 8],
[0, 5, 9],
[0, 6, 7],
[0, 6, 8],
[0, 6, 9],
[0, 7, 8],
[0, 7, 9],
[0, 8, 9],
[1, 2, 3],
[1, 2, 4],
[1, 2, 5],
[1, 2, 6],
[1, 2, 7],
[1, 2, 8],
[1, 2, 9],
[1, 3, 4],
[1, 3, 5],
[1, 3, 6],
[1, 3, 7],
[1, 3, 8],
[1, 3, 9],
[1, 4, 5],
[1, 4, 6],
[1, 4, 7],
[1, 4, 8],
[1, 4, 9],
[1, 5, 6],
[1, 5, 7],
[1, 5, 8],
[1, 5, 9],
[1, 6, 7],
[1, 6, 8],
[1, 6, 9],
[1, 7, 8],
[1, 7, 9],
[1, 8, 9],
[2, 3, 4],
[2, 3, 5],
[2, 3, 6],
[2, 3, 7],
[2, 3, 8],
[2, 3, 9],
[2, 4, 5],
[2, 4, 6],
[2, 4, 7],
[2, 4, 8],
[2, 4, 9],
[2, 5, 6],
[2, 5, 7],
[2, 5, 8],
[2, 5, 9],
[2, 6, 7],
[2, 6, 8],
[2, 6, 9],
[2, 7, 8],
[2, 7, 9],
[2, 8, 9],
[3, 4, 5],
[3, 4, 6],
[3, 4, 7],
[3, 4, 8],
[3, 4, 9],
[3, 5, 6],
[3, 5, 7],
[3, 5, 8],
[3, 5, 9],
[3, 6, 7],
[3, 6, 8],
[3, 6, 9],
[3, 7, 8],
[3, 7, 9],
[3, 8, 9],
[4, 5, 6],
[4, 5, 7],
[4, 5, 8],
[4, 5, 9],
[4, 6, 7],
[4, 6, 8],
[4, 6, 9],
[4, 7, 8],
[4, 7, 9],
[4, 8, 9],
[5, 6, 7],
[5, 6, 8],
[5, 6, 9],
[5, 7, 8],
[5, 7, 9],
[5, 8, 9],
[6, 7, 8],
[6, 7, 9],
[6, 8, 9],
[7, 8, 9]
]
)
selected_anchors = np.array([
[160, 200, 5.328804, 6.403124237, 0],
[140, 200, 4.848520, 15.524174696, 1],
[160, 180, 6.586562, 16.763054614, 2],
[140, 180, 5.861276, 21.931712199, 3],
[180, 200, 6.862733, 25.317977802, 4],
[180, 180, 4.114005, 29.681644159, 5],
[120, 200, 4.203356, 35.22783, 6],
[160, 160, 4.161473, 36.345563691, 7],
[120, 180, 5.483369, 38.48376281, 8],
[140, 160, 6.814601, 39, 9]
])
tested = asn.tmp_anch_combination(row, comb_anch, selected_anchors)
test_result = np.array(
[
[0, 1, 2, 0, 160, 200, 5.3288, 140, 200, 4.8485, 160, 180, 6.586562, 0],
[0, 1, 3, 0, 160, 200, 5.3288, 140, 200, 4.8485, 140, 180, 5.861275, 0],
[0, 1, 4, 0, 160, 200, 5.3288, 140, 200, 4.8485, 180, 200, 6.862732, 0],
[0, 1, 5, 0, 160, 200, 5.3288, 140, 200, 4.8485, 180, 180, 4.114004, 0],
[0, 1, 6, 0, 160, 200, 5.3288, 140, 200, 4.8485, 120, 200, 4.203356, 0],
[0, 1, 7, 0, 160, 200, 5.3288, 140, 200, 4.8485, 160, 160, 4.161472, 0],
[0, 1, 8, 0, 160, 200, 5.3288, 140, 200, 4.8485, 120, 180, 5.483369, 0],
[0, 1, 9, 0, 160, 200, 5.3288, 140, 200, 4.8485, 140, 160, 6.814601, 0],
[0, 2, 3, 0, 160, 200, 5.3288, 160, 180, 6.5866, 140, 180, 5.861275, 0],
[0, 2, 4, 0, 160, 200, 5.3288, 160, 180, 6.5866, 180, 200, 6.862732, 0],
[0, 2, 5, 0, 160, 200, 5.3288, 160, 180, 6.5866, 180, 180, 4.114004, 0],
[0, 2, 6, 0, 160, 200, 5.3288, 160, 180, 6.5866, 120, 200, 4.203356, 0],
[0, 2, 7, 0, 160, 200, 5.3288, 160, 180, 6.5866, 160, 160, 4.161472, 0],
[0, 2, 8, 0, 160, 200, 5.3288, 160, 180, 6.5866, 120, 180, 5.483369, 0],
[0, 2, 9, 0, 160, 200, 5.3288, 160, 180, 6.5866, 140, 160, 6.814601, 0],
[0, 3, 4, 0, 160, 200, 5.3288, 140, 180, 5.8613, 180, 200, 6.862732, 0],
[0, 3, 5, 0, 160, 200, 5.3288, 140, 180, 5.8613, 180, 180, 4.114004, 0],
[0, 3, 6, 0, 160, 200, 5.3288, 140, 180, 5.8613, 120, 200, 4.203356, 0],
[0, 3, 7, 0, 160, 200, 5.3288, 140, 180, 5.8613, 160, 160, 4.161472, 0],
[0, 3, 8, 0, 160, 200, 5.3288, 140, 180, 5.8613, 120, 180, 5.483369, 0],
[0, 3, 9, 0, 160, 200, 5.3288, 140, 180, 5.8613, 140, 160, 6.814601, 0],
[0, 4, 5, 0, 160, 200, 5.3288, 180, 200, 6.8627, 180, 180, 4.114004, 0],
[0, 4, 6, 0, 160, 200, 5.3288, 180, 200, 6.8627, 120, 200, 4.203356, 0],
[0, 4, 7, 0, 160, 200, 5.3288, 180, 200, 6.8627, 160, 160, 4.161472, 0],
[0, 4, 8, 0, 160, 200, 5.3288, 180, 200, 6.8627, 120, 180, 5.483369, 0],
[0, 4, 9, 0, 160, 200, 5.3288, 180, 200, 6.8627, 140, 160, 6.814601, 0],
[0, 5, 6, 0, 160, 200, 5.3288, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[0, 5, 7, 0, 160, 200, 5.3288, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[0, 5, 8, 0, 160, 200, 5.3288, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[0, 5, 9, 0, 160, 200, 5.3288, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[0, 6, 7, 0, 160, 200, 5.3288, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[0, 6, 8, 0, 160, 200, 5.3288, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[0, 6, 9, 0, 160, 200, 5.3288, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[0, 7, 8, 0, 160, 200, 5.3288, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[0, 7, 9, 0, 160, 200, 5.3288, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[0, 8, 9, 0, 160, 200, 5.3288, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[1, 2, 3, 0, 140, 200, 4.8485, 160, 180, 6.5866, 140, 180, 5.861275, 0],
[1, 2, 4, 0, 140, 200, 4.8485, 160, 180, 6.5866, 180, 200, 6.862732, 0],
[1, 2, 5, 0, 140, 200, 4.8485, 160, 180, 6.5866, 180, 180, 4.114004, 0],
[1, 2, 6, 0, 140, 200, 4.8485, 160, 180, 6.5866, 120, 200, 4.203356, 0],
[1, 2, 7, 0, 140, 200, 4.8485, 160, 180, 6.5866, 160, 160, 4.161472, 0],
[1, 2, 8, 0, 140, 200, 4.8485, 160, 180, 6.5866, 120, 180, 5.483369, 0],
[1, 2, 9, 0, 140, 200, 4.8485, 160, 180, 6.5866, 140, 160, 6.814601, 0],
[1, 3, 4, 0, 140, 200, 4.8485, 140, 180, 5.8613, 180, 200, 6.862732, 0],
[1, 3, 5, 0, 140, 200, 4.8485, 140, 180, 5.8613, 180, 180, 4.114004, 0],
[1, 3, 6, 0, 140, 200, 4.8485, 140, 180, 5.8613, 120, 200, 4.203356, 0],
[1, 3, 7, 0, 140, 200, 4.8485, 140, 180, 5.8613, 160, 160, 4.161472, 0],
[1, 3, 8, 0, 140, 200, 4.8485, 140, 180, 5.8613, 120, 180, 5.483369, 0],
[1, 3, 9, 0, 140, 200, 4.8485, 140, 180, 5.8613, 140, 160, 6.814601, 0],
[1, 4, 5, 0, 140, 200, 4.8485, 180, 200, 6.8627, 180, 180, 4.114004, 0],
[1, 4, 6, 0, 140, 200, 4.8485, 180, 200, 6.8627, 120, 200, 4.203356, 0],
[1, 4, 7, 0, 140, 200, 4.8485, 180, 200, 6.8627, 160, 160, 4.161472, 0],
[1, 4, 8, 0, 140, 200, 4.8485, 180, 200, 6.8627, 120, 180, 5.483369, 0],
[1, 4, 9, 0, 140, 200, 4.8485, 180, 200, 6.8627, 140, 160, 6.814601, 0],
[1, 5, 6, 0, 140, 200, 4.8485, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[1, 5, 7, 0, 140, 200, 4.8485, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[1, 5, 8, 0, 140, 200, 4.8485, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[1, 5, 9, 0, 140, 200, 4.8485, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[1, 6, 7, 0, 140, 200, 4.8485, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[1, 6, 8, 0, 140, 200, 4.8485, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[1, 6, 9, 0, 140, 200, 4.8485, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[1, 7, 8, 0, 140, 200, 4.8485, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[1, 7, 9, 0, 140, 200, 4.8485, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[1, 8, 9, 0, 140, 200, 4.8485, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[2, 3, 4, 0, 160, 180, 6.5866, 140, 180, 5.8613, 180, 200, 6.862732, 0],
[2, 3, 5, 0, 160, 180, 6.5866, 140, 180, 5.8613, 180, 180, 4.114004, 0],
[2, 3, 6, 0, 160, 180, 6.5866, 140, 180, 5.8613, 120, 200, 4.203356, 0],
[2, 3, 7, 0, 160, 180, 6.5866, 140, 180, 5.8613, 160, 160, 4.161472, 0],
[2, 3, 8, 0, 160, 180, 6.5866, 140, 180, 5.8613, 120, 180, 5.483369, 0],
[2, 3, 9, 0, 160, 180, 6.5866, 140, 180, 5.8613, 140, 160, 6.814601, 0],
[2, 4, 6, 0, 160, 180, 6.5866, 180, 200, 6.8627, 180, 180, 4.114004, 0],
[2, 4, 6, 0, 160, 180, 6.5866, 180, 200, 6.8627, 120, 200, 4.203356, 0],
[2, 4, 7, 0, 160, 180, 6.5866, 180, 200, 6.8627, 160, 160, 4.161472, 0],
[2, 4, 8, 0, 160, 180, 6.5866, 180, 200, 6.8627, 120, 180, 5.483369, 0],
[2, 4, 9, 0, 160, 180, 6.5866, 180, 200, 6.8627, 140, 160, 6.814601, 0],
[2, 5, 6, 0, 160, 180, 6.5866, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[2, 5, 7, 0, 160, 180, 6.5866, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[2, 5, 8, 0, 160, 180, 6.5866, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[2, 5, 9, 0, 160, 180, 6.5866, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[2, 6, 7, 0, 160, 180, 6.5866, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[2, 6, 8, 0, 160, 180, 6.5866, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[2, 6, 9, 0, 160, 180, 6.5866, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[2, 7, 8, 0, 160, 180, 6.5866, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[2, 7, 9, 0, 160, 180, 6.5866, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[2, 8, 9, 0, 160, 180, 6.5866, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[3, 5, 5, 0, 140, 180, 5.8613, 180, 200, 6.8627, 180, 180, 4.114004, 0],
[3, 5, 6, 0, 140, 180, 5.8613, 180, 200, 6.8627, 120, 200, 4.203356, 0],
[3, 5, 7, 0, 140, 180, 5.8613, 180, 200, 6.8627, 160, 160, 4.161472, 0],
[3, 5, 8, 0, 140, 180, 5.8613, 180, 200, 6.8627, 120, 180, 5.483369, 0],
[3, 5, 9, 0, 140, 180, 5.8613, 180, 200, 6.8627, 140, 160, 6.814601, 0],
[3, 6, 6, 0, 140, 180, 5.8613, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[3, 6, 7, 0, 140, 180, 5.8613, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[3, 6, 8, 0, 140, 180, 5.8613, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[3, 6, 9, 0, 140, 180, 5.8613, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[3, 7, 7, 0, 140, 180, 5.8613, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[3, 7, 8, 0, 140, 180, 5.8613, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[3, 7, 9, 0, 140, 180, 5.8613, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[3, 8, 8, 0, 140, 180, 5.8613, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[3, 8, 9, 0, 140, 180, 5.8613, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[3, 9, 9, 0, 140, 180, 5.8613, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[4, 5, 6, 0, 180, 200, 6.8627, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[4, 5, 7, 0, 180, 200, 6.8627, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[4, 5, 8, 0, 180, 200, 6.8627, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[4, 5, 9, 0, 180, 200, 6.8627, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[4, 6, 7, 0, 180, 200, 6.8627, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[4, 6, 8, 0, 180, 200, 6.8627, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[4, 6, 9, 0, 180, 200, 6.8627, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[4, 7, 8, 0, 180, 200, 6.8627, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[4, 7, 9, 0, 180, 200, 6.8627, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[4, 8, 9, 0, 180, 200, 6.8627, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[5, 6, 7, 0, 180, 180, 4.1140, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[5, 6, 8, 0, 180, 180, 4.1140, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[5, 6, 9, 0, 180, 180, 4.1140, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[5, 7, 8, 0, 180, 180, 4.1140, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[5, 7, 9, 0, 180, 180, 4.1140, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[5, 8, 9, 0, 180, 180, 4.1140, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[6, 7, 8, 0, 120, 200, 4.2034, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[6, 7, 9, 0, 120, 200, 4.2034, 160, 160, 4.1615, 140, 160, 6.8146, 0],
[6, 8, 9, 0, 120, 200, 4.2034, 120, 180, 5.4834, 140, 160, 6.8146, 0],
[7, 8, 9, 0, 160, 160, 4.1615, 120, 180, 5.4834, 140, 160, 6.8146, 0]
]
)
np.testing.assert_array_almost_equal(tested, test_result, 0)
def test_all_combinations(self):
position_list = [155.0, 196.0, 0.5]
comb_anch_tmp = np.array(
[
[0, 1, 2, 0, 160, 200, 5.3288, 140, 200, 4.8485, 160, 180, 6.586562, 0],
[0, 1, 3, 0, 160, 200, 5.3288, 140, 200, 4.8485, 140, 180, 5.861275, 0],
[0, 1, 4, 0, 160, 200, 5.3288, 140, 200, 4.8485, 180, 200, 6.862732, 0],
[0, 1, 5, 0, 160, 200, 5.3288, 140, 200, 4.8485, 180, 180, 4.114004, 0],
[0, 1, 6, 0, 160, 200, 5.3288, 140, 200, 4.8485, 120, 200, 4.203356, 0],
[0, 1, 7, 0, 160, 200, 5.3288, 140, 200, 4.8485, 160, 160, 4.161472, 0],
[0, 1, 8, 0, 160, 200, 5.3288, 140, 200, 4.8485, 120, 180, 5.483369, 0],
[0, 1, 9, 0, 160, 200, 5.3288, 140, 200, 4.8485, 140, 160, 6.814601, 0],
[0, 2, 3, 0, 160, 200, 5.3288, 160, 180, 6.5866, 140, 180, 5.861275, 0],
[0, 2, 4, 0, 160, 200, 5.3288, 160, 180, 6.5866, 180, 200, 6.862732, 0],
[0, 2, 5, 0, 160, 200, 5.3288, 160, 180, 6.5866, 180, 180, 4.114004, 0],
[0, 2, 6, 0, 160, 200, 5.3288, 160, 180, 6.5866, 120, 200, 4.203356, 0],
[0, 2, 7, 0, 160, 200, 5.3288, 160, 180, 6.5866, 160, 160, 4.161472, 0],
[0, 2, 8, 0, 160, 200, 5.3288, 160, 180, 6.5866, 120, 180, 5.483369, 0],
[0, 2, 9, 0, 160, 200, 5.3288, 160, 180, 6.5866, 140, 160, 6.814601, 0],
[0, 3, 4, 0, 160, 200, 5.3288, 140, 180, 5.8613, 180, 200, 6.862732, 0],
[0, 3, 5, 0, 160, 200, 5.3288, 140, 180, 5.8613, 180, 180, 4.114004, 0],
[0, 3, 6, 0, 160, 200, 5.3288, 140, 180, 5.8613, 120, 200, 4.203356, 0],
[0, 3, 7, 0, 160, 200, 5.3288, 140, 180, 5.8613, 160, 160, 4.161472, 0],
[0, 3, 8, 0, 160, 200, 5.3288, 140, 180, 5.8613, 120, 180, 5.483369, 0],
[0, 3, 9, 0, 160, 200, 5.3288, 140, 180, 5.8613, 140, 160, 6.814601, 0],
[0, 4, 5, 0, 160, 200, 5.3288, 180, 200, 6.8627, 180, 180, 4.114004, 0],
[0, 4, 6, 0, 160, 200, 5.3288, 180, 200, 6.8627, 120, 200, 4.203356, 0],
[0, 4, 7, 0, 160, 200, 5.3288, 180, 200, 6.8627, 160, 160, 4.161472, 0],
[0, 4, 8, 0, 160, 200, 5.3288, 180, 200, 6.8627, 120, 180, 5.483369, 0],
[0, 4, 9, 0, 160, 200, 5.3288, 180, 200, 6.8627, 140, 160, 6.814601, 0],
[0, 5, 6, 0, 160, 200, 5.3288, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[0, 5, 7, 0, 160, 200, 5.3288, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[0, 5, 8, 0, 160, 200, 5.3288, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[0, 5, 9, 0, 160, 200, 5.3288, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[0, 6, 7, 0, 160, 200, 5.3288, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[0, 6, 8, 0, 160, 200, 5.3288, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[0, 6, 9, 0, 160, 200, 5.3288, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[0, 7, 8, 0, 160, 200, 5.3288, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[0, 7, 9, 0, 160, 200, 5.3288, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[0, 8, 9, 0, 160, 200, 5.3288, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[1, 2, 3, 0, 140, 200, 4.8485, 160, 180, 6.5866, 140, 180, 5.861275, 0],
[1, 2, 4, 0, 140, 200, 4.8485, 160, 180, 6.5866, 180, 200, 6.862732, 0],
[1, 2, 5, 0, 140, 200, 4.8485, 160, 180, 6.5866, 180, 180, 4.114004, 0],
[1, 2, 6, 0, 140, 200, 4.8485, 160, 180, 6.5866, 120, 200, 4.203356, 0],
[1, 2, 7, 0, 140, 200, 4.8485, 160, 180, 6.5866, 160, 160, 4.161472, 0],
[1, 2, 8, 0, 140, 200, 4.8485, 160, 180, 6.5866, 120, 180, 5.483369, 0],
[1, 2, 9, 0, 140, 200, 4.8485, 160, 180, 6.5866, 140, 160, 6.814601, 0],
[1, 3, 4, 0, 140, 200, 4.8485, 140, 180, 5.8613, 180, 200, 6.862732, 0],
[1, 3, 5, 0, 140, 200, 4.8485, 140, 180, 5.8613, 180, 180, 4.114004, 0],
[1, 3, 6, 0, 140, 200, 4.8485, 140, 180, 5.8613, 120, 200, 4.203356, 0],
[1, 3, 7, 0, 140, 200, 4.8485, 140, 180, 5.8613, 160, 160, 4.161472, 0],
[1, 3, 8, 0, 140, 200, 4.8485, 140, 180, 5.8613, 120, 180, 5.483369, 0],
[1, 3, 9, 0, 140, 200, 4.8485, 140, 180, 5.8613, 140, 160, 6.814601, 0],
[1, 4, 5, 0, 140, 200, 4.8485, 180, 200, 6.8627, 180, 180, 4.114004, 0],
[1, 4, 6, 0, 140, 200, 4.8485, 180, 200, 6.8627, 120, 200, 4.203356, 0],
[1, 4, 7, 0, 140, 200, 4.8485, 180, 200, 6.8627, 160, 160, 4.161472, 0],
[1, 4, 8, 0, 140, 200, 4.8485, 180, 200, 6.8627, 120, 180, 5.483369, 0],
[1, 4, 9, 0, 140, 200, 4.8485, 180, 200, 6.8627, 140, 160, 6.814601, 0],
[1, 5, 6, 0, 140, 200, 4.8485, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[1, 5, 7, 0, 140, 200, 4.8485, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[1, 5, 8, 0, 140, 200, 4.8485, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[1, 5, 9, 0, 140, 200, 4.8485, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[1, 6, 7, 0, 140, 200, 4.8485, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[1, 6, 8, 0, 140, 200, 4.8485, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[1, 6, 9, 0, 140, 200, 4.8485, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[1, 7, 8, 0, 140, 200, 4.8485, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[1, 7, 9, 0, 140, 200, 4.8485, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[1, 8, 9, 0, 140, 200, 4.8485, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[2, 3, 4, 0, 160, 180, 6.5866, 140, 180, 5.8613, 180, 200, 6.862732, 0],
[2, 3, 5, 0, 160, 180, 6.5866, 140, 180, 5.8613, 180, 180, 4.114004, 0],
[2, 3, 6, 0, 160, 180, 6.5866, 140, 180, 5.8613, 120, 200, 4.203356, 0],
[2, 3, 7, 0, 160, 180, 6.5866, 140, 180, 5.8613, 160, 160, 4.161472, 0],
[2, 3, 8, 0, 160, 180, 6.5866, 140, 180, 5.8613, 120, 180, 5.483369, 0],
[2, 3, 9, 0, 160, 180, 6.5866, 140, 180, 5.8613, 140, 160, 6.814601, 0],
[2, 4, 6, 0, 160, 180, 6.5866, 180, 200, 6.8627, 180, 180, 4.114004, 0],
[2, 4, 6, 0, 160, 180, 6.5866, 180, 200, 6.8627, 120, 200, 4.203356, 0],
[2, 4, 7, 0, 160, 180, 6.5866, 180, 200, 6.8627, 160, 160, 4.161472, 0],
[2, 4, 8, 0, 160, 180, 6.5866, 180, 200, 6.8627, 120, 180, 5.483369, 0],
[2, 4, 9, 0, 160, 180, 6.5866, 180, 200, 6.8627, 140, 160, 6.814601, 0],
[2, 5, 6, 0, 160, 180, 6.5866, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[2, 5, 7, 0, 160, 180, 6.5866, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[2, 5, 8, 0, 160, 180, 6.5866, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[2, 5, 9, 0, 160, 180, 6.5866, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[2, 6, 7, 0, 160, 180, 6.5866, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[2, 6, 8, 0, 160, 180, 6.5866, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[2, 6, 9, 0, 160, 180, 6.5866, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[2, 7, 8, 0, 160, 180, 6.5866, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[2, 7, 9, 0, 160, 180, 6.5866, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[2, 8, 9, 0, 160, 180, 6.5866, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[3, 5, 5, 0, 140, 180, 5.8613, 180, 200, 6.8627, 180, 180, 4.114004, 0],
[3, 5, 6, 0, 140, 180, 5.8613, 180, 200, 6.8627, 120, 200, 4.203356, 0],
[3, 5, 7, 0, 140, 180, 5.8613, 180, 200, 6.8627, 160, 160, 4.161472, 0],
[3, 5, 8, 0, 140, 180, 5.8613, 180, 200, 6.8627, 120, 180, 5.483369, 0],
[3, 5, 9, 0, 140, 180, 5.8613, 180, 200, 6.8627, 140, 160, 6.814601, 0],
[3, 6, 6, 0, 140, 180, 5.8613, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[3, 6, 7, 0, 140, 180, 5.8613, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[3, 6, 8, 0, 140, 180, 5.8613, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[3, 6, 9, 0, 140, 180, 5.8613, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[3, 7, 7, 0, 140, 180, 5.8613, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[3, 7, 8, 0, 140, 180, 5.8613, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[3, 7, 9, 0, 140, 180, 5.8613, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[3, 8, 8, 0, 140, 180, 5.8613, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[3, 8, 9, 0, 140, 180, 5.8613, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[3, 9, 9, 0, 140, 180, 5.8613, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[4, 5, 6, 0, 180, 200, 6.8627, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[4, 5, 7, 0, 180, 200, 6.8627, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[4, 5, 8, 0, 180, 200, 6.8627, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[4, 5, 9, 0, 180, 200, 6.8627, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[4, 6, 7, 0, 180, 200, 6.8627, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[4, 6, 8, 0, 180, 200, 6.8627, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[4, 6, 9, 0, 180, 200, 6.8627, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[4, 7, 8, 0, 180, 200, 6.8627, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[4, 7, 9, 0, 180, 200, 6.8627, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[4, 8, 9, 0, 180, 200, 6.8627, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[5, 6, 7, 0, 180, 180, 4.1140, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[5, 6, 8, 0, 180, 180, 4.1140, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[5, 6, 9, 0, 180, 180, 4.1140, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[5, 7, 8, 0, 180, 180, 4.1140, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[5, 7, 9, 0, 180, 180, 4.1140, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[5, 8, 9, 0, 180, 180, 4.1140, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[6, 7, 8, 0, 120, 200, 4.2034, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[6, 7, 9, 0, 120, 200, 4.2034, 160, 160, 4.1615, 140, 160, 6.8146, 0],
[6, 8, 9, 0, 120, 200, 4.2034, 120, 180, 5.4834, 140, 160, 6.8146, 0],
[7, 8, 9, 0, 160, 160, 4.1615, 120, 180, 5.4834, 140, 160, 6.8146, 0]
]
)
sig_c = 0.0625
tested = asn.all_combinations(position_list, comb_anch_tmp, sig_c)
test_result = np.array(
[0.0615985240956481,
0.0755278692622397,
'inf',
0.0906468722218315,
'inf',
0.0576481035612150,
0.118097832483058,
0.0616508236793112,
0.0827039281886618,
0.0660463361741802,
0.0827039281886618,
0.0623782349499827,
'inf',
0.0671602523776219,
0.129560023902244,
0.0901024815761657,
0.0558308153260946,
0.0727462110823498,
0.0727462110823499,
0.159743254884436,
0.159743254884436,
0.118060750128726,
'inf',
0.0862568529859302,
0.198888398525328,
0.0978524359224338,
0.101495381640677,
0.0951058057651224,
0.0740181776582730,
0.0721078044831008,
0.0581146988775125,
0.124391993908157,
0.0634457785249022,
0.0653342641830980,
0.128242602918494,
0.0911550860253352,
0.0751272273179829,
0.0661438606461973,
0.134416448234600,
0.125685401502809,
0.134416448234600,
0.0639620209620384,
0.0661438606461974,
0.0902811532605790,
0.0935982481746206,
0.100271240188883,
0.173107389536710,
0.111777025001596,
'inf',
0.0946226505835292,
'inf',
0.0535995008606919,
0.0983514632389458,
0.0541559801468351,
0.184696682171827,
0.0735841797831203,
0.0669209726118692,
0.0545522449067303,
0.142803022748627,
0.164451601887282,
0.124200642738285,
0.0656197163100553,
0.116901057881806,
0.126390267890328,
0.159641864625151,
'inf',
0.176356596175917,
0.105781861921399,
'inf',
0.114288423164720,
0.0987979813929398,
0.0824815716172848,
0.148945367161375,
0.120082977994962,
'inf',
0.273339581610067,
0.122241177084292,
'inf',
0.181636260333079,
0.0963073689306489,
0.114147075204808,
0.0666964525010979,
0.130052730611788,
0.170788011881237,
0.0943698313868470,
0.0888797571350769,
0.0803546975056880,
0.0651494435875258,
0.237559966255753,
0.107794728785590,
0.129655067618261,
0.0827919875803034,
'inf',
0.0916799775280730,
'inf',
0.131267042223635,
0.192724889615585,
0.194733460712920,
0.134198636423894,
0.157039272749473,
0.126986784416138,
0.238674601319149,
0.127891766165732,
0.160564723679238,
0.0563979757239257,
0.119808118716213,
0.0603922828113410,
0.0808922066628258,
0.166512476670147,
0.0904687516119369,
0.0767930120967408,
0.114992143221601,
0.0647829289821586,
0.133723236066168,
0.271822785009573,
0.100548041943923,
0.136287092046481,
0.139112936502602,
0.313102536826369,
0.201910956905639], dtype=float
)
np.testing.assert_array_almost_equal(tested[:,13], test_result, 4)
def test_find_min_pdop(self):
comb_anch_last_col_13 = np.array(
[0.0615985240956481,
0.0755278692622397,
'inf',
0.0906468722218315,
'inf',
0.0576481035612150,
0.118097832483058,
0.0616508236793112,
0.0827039281886618,
0.0660463361741802,
0.0827039281886618,
0.0623782349499827,
'inf',
0.0671602523776219,
0.129560023902244,
0.0901024815761657,
0.0558308153260946,
0.0727462110823498,
0.0727462110823499,
0.159743254884436,
0.159743254884436,
0.118060750128726,
'inf',
0.0862568529859302,
0.198888398525328,
0.0978524359224338,
0.101495381640677,
0.0951058057651224,
0.0740181776582730,
0.0721078044831008,
0.0581146988775125,
0.124391993908157,
0.0634457785249022,
0.0653342641830980,
0.128242602918494,
0.0911550860253352,
0.0751272273179829,
0.0661438606461973,
0.134416448234600,
0.125685401502809,
0.134416448234600,
0.0639620209620384,
0.0661438606461974,
0.0902811532605790,
0.0935982481746206,
0.100271240188883,
0.173107389536710,
0.111777025001596,
'inf',
0.0946226505835292,
'inf',
0.0535995008606919,
0.0983514632389458,
0.0541559801468351,
0.184696682171827,
0.0735841797831203,
0.0669209726118692,
0.0545522449067303,
0.142803022748627,
0.164451601887282,
0.124200642738285,
0.0656197163100553,
0.116901057881806,
0.126390267890328,
0.159641864625151,
'inf',
0.176356596175917,
0.105781861921399,
'inf',
0.114288423164720,
0.0987979813929398,
0.0824815716172848,
0.148945367161375,
0.120082977994962,
'inf',
0.273339581610067,
0.122241177084292,
'inf',
0.181636260333079,
0.0963073689306489,
0.114147075204808,
0.0666964525010979,
0.130052730611788,
0.170788011881237,
0.0943698313868470,
0.0888797571350769,
0.0803546975056880,
0.0651494435875258,
0.237559966255753,
0.107794728785590,
0.129655067618261,
0.0827919875803034,
'inf',
0.0916799775280730,
'inf',
0.131267042223635,
0.192724889615585,
0.194733460712920,
0.134198636423894,
0.157039272749473,
0.126986784416138,
0.238674601319149,
0.127891766165732,
0.160564723679238,
0.0563979757239257,
0.119808118716213,
0.0603922828113410,
0.0808922066628258,
0.166512476670147,
0.0904687516119369,
0.0767930120967408,
0.114992143221601,
0.0647829289821586,
0.133723236066168,
0.271822785009573,
0.100548041943923,
0.136287092046481,
0.139112936502602,
0.313102536826369,
0.201910956905639], dtype=float
)
temp_comb_anch_last_arr = np.zeros((len(comb_anch_last_col_13), 14), dtype=float)
temp_comb_anch_last_arr[:,13] = comb_anch_last_col_13
tested = asn.find_min_pdop(temp_comb_anch_last_arr)
test_result = 0.0536
self.assertAlmostEqual(tested, test_result, 4)
def test_select_anchors_main(self):
min_pdop = 0.0536
comb_anch_tmp = np.array(
[
[0, 1, 2, 0, 160, 200, 5.3288, 140, 200, 4.8485, 160, 180, 6.586562, 0],
[0, 1, 3, 0, 160, 200, 5.3288, 140, 200, 4.8485, 140, 180, 5.861275, 0],
[0, 1, 4, 0, 160, 200, 5.3288, 140, 200, 4.8485, 180, 200, 6.862732, 0],
[0, 1, 5, 0, 160, 200, 5.3288, 140, 200, 4.8485, 180, 180, 4.114004, 0],
[0, 1, 6, 0, 160, 200, 5.3288, 140, 200, 4.8485, 120, 200, 4.203356, 0],
[0, 1, 7, 0, 160, 200, 5.3288, 140, 200, 4.8485, 160, 160, 4.161472, 0],
[0, 1, 8, 0, 160, 200, 5.3288, 140, 200, 4.8485, 120, 180, 5.483369, 0],
[0, 1, 9, 0, 160, 200, 5.3288, 140, 200, 4.8485, 140, 160, 6.814601, 0],
[0, 2, 3, 0, 160, 200, 5.3288, 160, 180, 6.5866, 140, 180, 5.861275, 0],
[0, 2, 4, 0, 160, 200, 5.3288, 160, 180, 6.5866, 180, 200, 6.862732, 0],
[0, 2, 5, 0, 160, 200, 5.3288, 160, 180, 6.5866, 180, 180, 4.114004, 0],
[0, 2, 6, 0, 160, 200, 5.3288, 160, 180, 6.5866, 120, 200, 4.203356, 0],
[0, 2, 7, 0, 160, 200, 5.3288, 160, 180, 6.5866, 160, 160, 4.161472, 0],
[0, 2, 8, 0, 160, 200, 5.3288, 160, 180, 6.5866, 120, 180, 5.483369, 0],
[0, 2, 9, 0, 160, 200, 5.3288, 160, 180, 6.5866, 140, 160, 6.814601, 0],
[0, 3, 4, 0, 160, 200, 5.3288, 140, 180, 5.8613, 180, 200, 6.862732, 0],
[0, 3, 5, 0, 160, 200, 5.3288, 140, 180, 5.8613, 180, 180, 4.114004, 0],
[0, 3, 6, 0, 160, 200, 5.3288, 140, 180, 5.8613, 120, 200, 4.203356, 0],
[0, 3, 7, 0, 160, 200, 5.3288, 140, 180, 5.8613, 160, 160, 4.161472, 0],
[0, 3, 8, 0, 160, 200, 5.3288, 140, 180, 5.8613, 120, 180, 5.483369, 0],
[0, 3, 9, 0, 160, 200, 5.3288, 140, 180, 5.8613, 140, 160, 6.814601, 0],
[0, 4, 5, 0, 160, 200, 5.3288, 180, 200, 6.8627, 180, 180, 4.114004, 0],
[0, 4, 6, 0, 160, 200, 5.3288, 180, 200, 6.8627, 120, 200, 4.203356, 0],
[0, 4, 7, 0, 160, 200, 5.3288, 180, 200, 6.8627, 160, 160, 4.161472, 0],
[0, 4, 8, 0, 160, 200, 5.3288, 180, 200, 6.8627, 120, 180, 5.483369, 0],
[0, 4, 9, 0, 160, 200, 5.3288, 180, 200, 6.8627, 140, 160, 6.814601, 0],
[0, 5, 6, 0, 160, 200, 5.3288, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[0, 5, 7, 0, 160, 200, 5.3288, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[0, 5, 8, 0, 160, 200, 5.3288, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[0, 5, 9, 0, 160, 200, 5.3288, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[0, 6, 7, 0, 160, 200, 5.3288, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[0, 6, 8, 0, 160, 200, 5.3288, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[0, 6, 9, 0, 160, 200, 5.3288, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[0, 7, 8, 0, 160, 200, 5.3288, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[0, 7, 9, 0, 160, 200, 5.3288, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[0, 8, 9, 0, 160, 200, 5.3288, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[1, 2, 3, 0, 140, 200, 4.8485, 160, 180, 6.5866, 140, 180, 5.861275, 0],
[1, 2, 4, 0, 140, 200, 4.8485, 160, 180, 6.5866, 180, 200, 6.862732, 0],
[1, 2, 5, 0, 140, 200, 4.8485, 160, 180, 6.5866, 180, 180, 4.114004, 0],
[1, 2, 6, 0, 140, 200, 4.8485, 160, 180, 6.5866, 120, 200, 4.203356, 0],
[1, 2, 7, 0, 140, 200, 4.8485, 160, 180, 6.5866, 160, 160, 4.161472, 0],
[1, 2, 8, 0, 140, 200, 4.8485, 160, 180, 6.5866, 120, 180, 5.483369, 0],
[1, 2, 9, 0, 140, 200, 4.8485, 160, 180, 6.5866, 140, 160, 6.814601, 0],
[1, 3, 4, 0, 140, 200, 4.8485, 140, 180, 5.8613, 180, 200, 6.862732, 0],
[1, 3, 5, 0, 140, 200, 4.8485, 140, 180, 5.8613, 180, 180, 4.114004, 0],
[1, 3, 6, 0, 140, 200, 4.8485, 140, 180, 5.8613, 120, 200, 4.203356, 0],
[1, 3, 7, 0, 140, 200, 4.8485, 140, 180, 5.8613, 160, 160, 4.161472, 0],
[1, 3, 8, 0, 140, 200, 4.8485, 140, 180, 5.8613, 120, 180, 5.483369, 0],
[1, 3, 9, 0, 140, 200, 4.8485, 140, 180, 5.8613, 140, 160, 6.814601, 0],
[1, 4, 5, 0, 140, 200, 4.8485, 180, 200, 6.8627, 180, 180, 4.114004, 0],
[1, 4, 6, 0, 140, 200, 4.8485, 180, 200, 6.8627, 120, 200, 4.203356, 0],
[1, 4, 7, 0, 140, 200, 4.8485, 180, 200, 6.8627, 160, 160, 4.161472, 0],
[1, 4, 8, 0, 140, 200, 4.8485, 180, 200, 6.8627, 120, 180, 5.483369, 0],
[1, 4, 9, 0, 140, 200, 4.8485, 180, 200, 6.8627, 140, 160, 6.814601, 0],
[1, 5, 6, 0, 140, 200, 4.8485, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[1, 5, 7, 0, 140, 200, 4.8485, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[1, 5, 8, 0, 140, 200, 4.8485, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[1, 5, 9, 0, 140, 200, 4.8485, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[1, 6, 7, 0, 140, 200, 4.8485, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[1, 6, 8, 0, 140, 200, 4.8485, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[1, 6, 9, 0, 140, 200, 4.8485, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[1, 7, 8, 0, 140, 200, 4.8485, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[1, 7, 9, 0, 140, 200, 4.8485, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[1, 8, 9, 0, 140, 200, 4.8485, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[2, 3, 4, 0, 160, 180, 6.5866, 140, 180, 5.8613, 180, 200, 6.862732, 0],
[2, 3, 5, 0, 160, 180, 6.5866, 140, 180, 5.8613, 180, 180, 4.114004, 0],
[2, 3, 6, 0, 160, 180, 6.5866, 140, 180, 5.8613, 120, 200, 4.203356, 0],
[2, 3, 7, 0, 160, 180, 6.5866, 140, 180, 5.8613, 160, 160, 4.161472, 0],
[2, 3, 8, 0, 160, 180, 6.5866, 140, 180, 5.8613, 120, 180, 5.483369, 0],
[2, 3, 9, 0, 160, 180, 6.5866, 140, 180, 5.8613, 140, 160, 6.814601, 0],
[2, 4, 6, 0, 160, 180, 6.5866, 180, 200, 6.8627, 180, 180, 4.114004, 0],
[2, 4, 6, 0, 160, 180, 6.5866, 180, 200, 6.8627, 120, 200, 4.203356, 0],
[2, 4, 7, 0, 160, 180, 6.5866, 180, 200, 6.8627, 160, 160, 4.161472, 0],
[2, 4, 8, 0, 160, 180, 6.5866, 180, 200, 6.8627, 120, 180, 5.483369, 0],
[2, 4, 9, 0, 160, 180, 6.5866, 180, 200, 6.8627, 140, 160, 6.814601, 0],
[2, 5, 6, 0, 160, 180, 6.5866, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[2, 5, 7, 0, 160, 180, 6.5866, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[2, 5, 8, 0, 160, 180, 6.5866, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[2, 5, 9, 0, 160, 180, 6.5866, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[2, 6, 7, 0, 160, 180, 6.5866, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[2, 6, 8, 0, 160, 180, 6.5866, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[2, 6, 9, 0, 160, 180, 6.5866, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[2, 7, 8, 0, 160, 180, 6.5866, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[2, 7, 9, 0, 160, 180, 6.5866, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[2, 8, 9, 0, 160, 180, 6.5866, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[3, 5, 5, 0, 140, 180, 5.8613, 180, 200, 6.8627, 180, 180, 4.114004, 0],
[3, 5, 6, 0, 140, 180, 5.8613, 180, 200, 6.8627, 120, 200, 4.203356, 0],
[3, 5, 7, 0, 140, 180, 5.8613, 180, 200, 6.8627, 160, 160, 4.161472, 0],
[3, 5, 8, 0, 140, 180, 5.8613, 180, 200, 6.8627, 120, 180, 5.483369, 0],
[3, 5, 9, 0, 140, 180, 5.8613, 180, 200, 6.8627, 140, 160, 6.814601, 0],
[3, 6, 6, 0, 140, 180, 5.8613, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[3, 6, 7, 0, 140, 180, 5.8613, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[3, 6, 8, 0, 140, 180, 5.8613, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[3, 6, 9, 0, 140, 180, 5.8613, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[3, 7, 7, 0, 140, 180, 5.8613, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[3, 7, 8, 0, 140, 180, 5.8613, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[3, 7, 9, 0, 140, 180, 5.8613, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[3, 8, 8, 0, 140, 180, 5.8613, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[3, 8, 9, 0, 140, 180, 5.8613, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[3, 9, 9, 0, 140, 180, 5.8613, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[4, 5, 6, 0, 180, 200, 6.8627, 180, 180, 4.1140, 120, 200, 4.203356, 0],
[4, 5, 7, 0, 180, 200, 6.8627, 180, 180, 4.1140, 160, 160, 4.161472, 0],
[4, 5, 8, 0, 180, 200, 6.8627, 180, 180, 4.1140, 120, 180, 5.483369, 0],
[4, 5, 9, 0, 180, 200, 6.8627, 180, 180, 4.1140, 140, 160, 6.814601, 0],
[4, 6, 7, 0, 180, 200, 6.8627, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[4, 6, 8, 0, 180, 200, 6.8627, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[4, 6, 9, 0, 180, 200, 6.8627, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[4, 7, 8, 0, 180, 200, 6.8627, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[4, 7, 9, 0, 180, 200, 6.8627, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[4, 8, 9, 0, 180, 200, 6.8627, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[5, 6, 7, 0, 180, 180, 4.1140, 120, 200, 4.2034, 160, 160, 4.161472, 0],
[5, 6, 8, 0, 180, 180, 4.1140, 120, 200, 4.2034, 120, 180, 5.483369, 0],
[5, 6, 9, 0, 180, 180, 4.1140, 120, 200, 4.2034, 140, 160, 6.814601, 0],
[5, 7, 8, 0, 180, 180, 4.1140, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[5, 7, 9, 0, 180, 180, 4.1140, 160, 160, 4.1615, 140, 160, 6.814601, 0],
[5, 8, 9, 0, 180, 180, 4.1140, 120, 180, 5.4834, 140, 160, 6.814601, 0],
[6, 7, 8, 0, 120, 200, 4.2034, 160, 160, 4.1615, 120, 180, 5.483369, 0],
[6, 7, 9, 0, 120, 200, 4.2034, 160, 160, 4.1615, 140, 160, 6.8146, 0],
[6, 8, 9, 0, 120, 200, 4.2034, 120, 180, 5.4834, 140, 160, 6.8146, 0],
[7, 8, 9, 0, 160, 160, 4.1615, 120, 180, 5.4834, 140, 160, 6.8146, 0]
]
)
comb_anch_last_col_13 = np.array(
[0.0615985240956481,
0.0755278692622397,
'inf',
0.0906468722218315,
'inf',
0.0576481035612150,
0.118097832483058,
0.0616508236793112,
0.0827039281886618,
0.0660463361741802,
0.0827039281886618,
0.0623782349499827,
'inf',
0.0671602523776219,
0.129560023902244,
0.0901024815761657,
0.0558308153260946,
0.0727462110823498,
0.0727462110823499,
0.159743254884436,
0.159743254884436,
0.118060750128726,
'inf',
0.0862568529859302,
0.198888398525328,
0.0978524359224338,
0.101495381640677,
0.0951058057651224,
0.0740181776582730,
0.0721078044831008,
0.0581146988775125,
0.124391993908157,
0.0634457785249022,
0.0653342641830980,
0.128242602918494,
0.0911550860253352,
0.0751272273179829,
0.0661438606461973,
0.134416448234600,
0.125685401502809,
0.134416448234600,
0.0639620209620384,
0.0661438606461974,
0.0902811532605790,
0.0935982481746206,
0.100271240188883,
0.173107389536710,
0.111777025001596,
'inf',
0.0946226505835292,
'inf',
0.0535995008606919,
0.0983514632389458,
0.0541559801468351,
0.184696682171827,
0.0735841797831203,
0.0669209726118692,
0.0545522449067303,
0.142803022748627,
0.164451601887282,
0.124200642738285,
0.0656197163100553,
0.116901057881806,
0.126390267890328,
0.159641864625151,
'inf',
0.176356596175917,
0.105781861921399,
'inf',
0.114288423164720,
0.0987979813929398,
0.0824815716172848,
0.148945367161375,
0.120082977994962,
'inf',
0.273339581610067,
0.122241177084292,
'inf',
0.181636260333079,
0.0963073689306489,
0.114147075204808,
0.0666964525010979,
0.130052730611788,
0.170788011881237,
0.0943698313868470,
0.0888797571350769,
0.0803546975056880,
0.0651494435875258,
0.237559966255753,
0.107794728785590,
0.129655067618261,
0.0827919875803034,
'inf',
0.0916799775280730,
'inf',
0.131267042223635,
0.192724889615585,
0.194733460712920,
0.134198636423894,
0.157039272749473,
0.126986784416138,
0.238674601319149,
0.127891766165732,
0.160564723679238,
0.0563979757239257,
0.119808118716213,
0.0603922828113410,
0.0808922066628258,
0.166512476670147,
0.0904687516119369,
0.0767930120967408,
0.114992143221601,
0.0647829289821586,
0.133723236066168,
0.271822785009573,
0.100548041943923,
0.136287092046481,
0.139112936502602,
0.313102536826369,
0.201910956905639], dtype=float
)
comb_anch_tmp[:,13] = comb_anch_last_col_13
comb_anch_last = comb_anch_tmp
tested = asn.select_anchors_main(self.ips_dict, min_pdop, comb_anch_last)
test_result = {
6: [140, 200, 4.848520],
17: [180, 200, 6.862733],
13: [160, 160, 4.161473]
}
self.assertDictEqual(tested, test_result)
def test_find_sel_anch_index(self):
selected_anchors_dict = {
6: [140, 200, 4.848520],
17: [180, 200, 6.862733],
13: [160, 160, 4.161473]
}
tested = asn.find_sel_anch_index(self.ips_dict, selected_anchors_dict)
test_result = [8, 7, 2]
self.assertListEqual(tested, test_result)
def test_subtract_one_from_each_index(self):
sel_anch_index_list = [8, 7, 2]
tested = asn.subtract_one_from_each_index(sel_anch_index_list)[0]
test_result = [7, 6, 1]
self.assertListEqual(tested, test_result)
def test_find_the_ddoa_values(self):
new_index_list = [7, 6, 1]
tested = asn.find_the_ddoa_values(self.ips_dict, new_index_list)
test_result = [-13.973034128654382, -3.2857739316623658, 5.549387626398598]
self.assertListEqual(tested, test_result)
def test_detect_finalised_tdoa_values(self):
min_ind = 2
sel_anch_tdoa_list = [-13.973034128654382, -3.2857739316623658, 5.549387626398598]
tested = asn.detect_finalised_tdoa_values(min_ind, sel_anch_tdoa_list)
test_result = [-19.522421755, -8.835161558]
self.assertAlmostEqual(tested[0], test_result[0], 4)
self.assertAlmostEqual(tested[1], test_result[1], 4)
def test_mode_1_select_anchors_except_2d(self):
mode = 1
tested = asn.select_anchors_except_2d(self.ips_dict, mode)
test_result = {
0: [120, 200, 4.203356],
5: [120, 180, 5.483369]
}
self.assertDictEqual(tested, test_result)
def test_mode_3_select_anchors_except_2d(self):
mode = 3
tested = asn.select_anchors_except_2d(self.ips_dict, mode)
test_result = {
0: [120, 200, 4.203356],
5: [120, 180, 5.483369],
6: [140, 200, 4.848520],
8: [140, 180, 5.861276]
}
self.assertDictEqual(tested, test_result)
def test_mode_1_generate_selected_tdoa(self):
mode = 1
selected_anchors_dict = {
0: [120, 200, 4.203356],
5: [120, 180, 5.483369]
}
tag_index = np.array([155.0, 196.0, 0.5])
tested = asn.generate_selected_tdoa(selected_anchors_dict, mode, tag_index)
test_result = [3.383122267790938]
self.assertListEqual(tested, test_result)
def test_mode_3_generate_selected_tdoa(self):
mode = 3
tested = asn.select_anchors_except_2d(self.ips_dict, mode)
selected_anchors_dict = {
0: [120, 200, 4.203356],
5: [120, 180, 5.483369],
6: [140, 200, 4.848520],
8: [140, 180, 5.861276]
}
tag_index = np.array([155.0, 196.0, 0.5])
tested = asn.generate_selected_tdoa(selected_anchors_dict, mode, tag_index)
test_result = [-12.844459291960657, 3.383122267790938, -19.300241408988885]
self.assertListEqual(tested, test_result)
if __name__ == '__main__':
import rosunit
rosunit.unitrun(PKG, NAME, TestAnchorSelection, sysargs = None, coverage_packages = [str(PKG)])
| 50.309051
| 98
| 0.416849
| 9,528
| 68,370
| 2.943955
| 0.031486
| 0.033939
| 0.029697
| 0.030802
| 0.92877
| 0.914296
| 0.886168
| 0.871159
| 0.86492
| 0.860107
| 0
| 0.593896
| 0.422086
| 68,370
| 1,359
| 99
| 50.309051
| 0.116015
| 0.002384
| 0
| 0.902022
| 0
| 0
| 0.004292
| 0.000385
| 0
| 0
| 0
| 0
| 0.015552
| 1
| 0.014774
| false
| 0
| 0.004666
| 0
| 0.022551
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
74ebedb4d087d697c8a1b058226a54262708f141
| 198
|
py
|
Python
|
tests/test_init.py
|
aracnid/aracnid-logger
|
03245e8d4ee746b2caea2e6b0faacacee13da7dd
|
[
"MIT"
] | null | null | null |
tests/test_init.py
|
aracnid/aracnid-logger
|
03245e8d4ee746b2caea2e6b0faacacee13da7dd
|
[
"MIT"
] | null | null | null |
tests/test_init.py
|
aracnid/aracnid-logger
|
03245e8d4ee746b2caea2e6b0faacacee13da7dd
|
[
"MIT"
] | null | null | null |
"""Test functions for Aracnid Logger import.
"""
import aracnid_logger
def test_version():
"""Tests that Aracnid Logger was imported successfully.
"""
assert aracnid_logger.__version__
| 22
| 59
| 0.737374
| 23
| 198
| 6.043478
| 0.608696
| 0.374101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171717
| 198
| 8
| 60
| 24.75
| 0.847561
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2d28d597e83c8b0a60082066fd16f1b75e324e3d
| 157
|
py
|
Python
|
dtables/errors.py
|
dtables/dtables
|
b161c53b5901bfa744c27d6f32b6c431c98bbc25
|
[
"MIT"
] | 2
|
2018-01-05T18:19:00.000Z
|
2020-11-19T19:50:20.000Z
|
dtables/errors.py
|
dtables/dtables
|
b161c53b5901bfa744c27d6f32b6c431c98bbc25
|
[
"MIT"
] | null | null | null |
dtables/errors.py
|
dtables/dtables
|
b161c53b5901bfa744c27d6f32b6c431c98bbc25
|
[
"MIT"
] | null | null | null |
class ShapeMismatchException(Exception):
pass
class MultipleDimentionException(Exception):
pass
class LengthMismatchException(Exception):
pass
| 17.444444
| 44
| 0.796178
| 12
| 157
| 10.416667
| 0.5
| 0.312
| 0.288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146497
| 157
| 8
| 45
| 19.625
| 0.932836
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
2d4637fe76bbe48edb38da5612f8b314a5bf422c
| 1,417
|
py
|
Python
|
common/models/rbac/Action.py
|
apanly/python_learn_master
|
93a214241812f77a006cc8350a7bad6c4eec6c89
|
[
"BSD-3-Clause"
] | 5
|
2020-11-29T14:21:18.000Z
|
2021-10-07T04:11:29.000Z
|
common/models/rbac/Action.py
|
linkgeek/python_flask_cms
|
ff5e794b5b11075670e5d11a8cbda0a137319876
|
[
"BSD-3-Clause"
] | null | null | null |
common/models/rbac/Action.py
|
linkgeek/python_flask_cms
|
ff5e794b5b11075670e5d11a8cbda0a137319876
|
[
"BSD-3-Clause"
] | 2
|
2020-11-30T09:55:53.000Z
|
2022-03-19T12:49:40.000Z
|
# coding: utf-8
from application import db
class Action(db.Model):
__tablename__ = 'action'
id = db.Column(db.Integer, primary_key=True, info='权限ID')
level1_name = db.Column(db.String(20, 'utf8mb4_general_ci'), nullable=False, server_default=db.FetchedValue(), info='一级菜单名称')
level2_name = db.Column(db.String(20, 'utf8mb4_general_ci'), nullable=False, server_default=db.FetchedValue(), info='二级菜单名称')
name = db.Column(db.String(20, 'utf8mb4_general_ci'), nullable=False, server_default=db.FetchedValue(), info='权限名')
url = db.Column(db.String(255, 'utf8mb4_general_ci'), nullable=False, server_default=db.FetchedValue(), info='允许访问的链接,用特殊字符分割')
level1_weight = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='一级菜单权重')
level2_weight = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='二级菜单权重')
weight = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='权重 越大排名越前面')
is_important = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='是否是重要权限')
status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='1 有效 0无效')
updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue(), info='最后一次更新时间')
created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue(), info='插入时间')
| 61.608696
| 131
| 0.741708
| 193
| 1,417
| 5.284974
| 0.295337
| 0.094118
| 0.117647
| 0.280392
| 0.746078
| 0.746078
| 0.746078
| 0.746078
| 0.746078
| 0.746078
| 0
| 0.018913
| 0.104446
| 1,417
| 22
| 132
| 64.409091
| 0.78487
| 0.009174
| 0
| 0
| 0
| 0
| 0.114836
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 1.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
740f361cfdd257a0aa5cbac65603588a6f57f04d
| 4,835
|
py
|
Python
|
tests/test_featurization.py
|
tmadden/pace
|
0be5d92579efc4e6219f5c58bb4e4ac6754e865e
|
[
"MIT"
] | null | null | null |
tests/test_featurization.py
|
tmadden/pace
|
0be5d92579efc4e6219f5c58bb4e4ac6754e865e
|
[
"MIT"
] | 9
|
2019-01-16T15:13:37.000Z
|
2019-07-29T18:31:58.000Z
|
tests/test_featurization.py
|
tmadden/pace
|
0be5d92579efc4e6219f5c58bb4e4ac6754e865e
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import pace.featurization
def test_load_aafeatmat():
aafeatmat_name = 'BLOSUM62'
aafeatmat = pace.featurization.load_aafeatmat(aafeatmat_name)
assert aafeatmat.shape[0] == 20
assert all([aa in list(pace.definitions.amino_acids) for aa in aafeatmat.index.values])
def test_encode_onehot():
from pace.featurization import encode
sequences = [
"AADIFYSRY",
"AADLNLVLY",
"AAAAAAACL",
"WIDEDVLRY"]
encoding = encode(sequences)
assert encoding.shape[0] == len(sequences)
assert encoding.shape[1] % len(sequences[0]) == 0
assert np.all(encoding[0,:] == [
1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 1.
])
assert np.all(encoding[-1,:] == [
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 1.
])
def test_encode():
from pace.featurization import encode
sequences = [
"AADIFYSRY",
"AADLNLVLY",
"AAAAAAACL",
"WIDEDVLRY"]
encoding = encode(sequences, 'BLOSUM62')
assert encoding.shape[0] == len(sequences)
assert encoding.shape[1] % len(sequences[0]) == 0
assert np.all(encoding[0,:] == [
4., -1., -2., -2., 0., -1., -1., 0., -2., -1., -1., -1., -1.,
-2., -1., 1., 0., -3., -2., 0., 4., -1., -2., -2., 0., -1.,
-1., 0., -2., -1., -1., -1., -1., -2., -1., 1., 0., -3., -2.,
0., -2., -2., 1., 6., -3., 0., 2., -1., -1., -3., -4., -1.,
-3., -3., -1., 0., -1., -4., -3., -3., -1., -3., -3., -3., -1.,
-3., -3., -4., -3., 4., 2., -3., 1., 0., -3., -2., -1., -3.,
-1., 3., -2., -3., -3., -3., -2., -3., -3., -3., -1., 0., 0.,
-3., 0., 6., -4., -2., -2., 1., 3., -1., -2., -2., -2., -3.,
-2., -1., -2., -3., 2., -1., -1., -2., -1., 3., -3., -2., -2.,
2., 7., -1., 1., -1., 1., 0., -1., 0., 0., 0., -1., -2.,
-2., 0., -1., -2., -1., 4., 1., -3., -2., -2., -1., 5., 0.,
-2., -3., 1., 0., -2., 0., -3., -2., 2., -1., -3., -2., -1.,
-1., -3., -2., -3., -2., -2., -2., -3., -2., -1., -2., -3., 2.,
-1., -1., -2., -1., 3., -3., -2., -2., 2., 7., -1.
])
assert np.all(encoding[-1,:] == [
-3., -3., -4., -4., -2., -2., -3., -2., -2., -3., -2., -3., -1.,
1., -4., -3., -2., 11., 2., -3., -1., -3., -3., -3., -1., -3.,
-3., -4., -3., 4., 2., -3., 1., 0., -3., -2., -1., -3., -1.,
3., -2., -2., 1., 6., -3., 0., 2., -1., -1., -3., -4., -1.,
-3., -3., -1., 0., -1., -4., -3., -3., -1., 0., 0., 2., -4.,
2., 5., -2., 0., -3., -3., 1., -2., -3., -1., 0., -1., -3.,
-2., -2., -2., -2., 1., 6., -3., 0., 2., -1., -1., -3., -4.,
-1., -3., -3., -1., 0., -1., -4., -3., -3., 0., -3., -3., -3.,
-1., -2., -2., -3., -3., 3., 1., -2., 1., -1., -2., -2., 0.,
-3., -1., 4., -1., -2., -3., -4., -1., -2., -3., -4., -3., 2.,
4., -2., 2., 0., -3., -2., -1., -2., -1., 1., -1., 5., 0.,
-2., -3., 1., 0., -2., 0., -3., -2., 2., -1., -3., -2., -1.,
-1., -3., -2., -3., -2., -2., -2., -3., -2., -1., -2., -3., 2.,
-1., -1., -2., -1., 3., -3., -2., -2., 2., 7., -1.
])
| 50.894737
| 91
| 0.294312
| 847
| 4,835
| 1.669421
| 0.054309
| 0.468175
| 0.655587
| 0.823197
| 0.751768
| 0.719236
| 0.691655
| 0.691655
| 0.691655
| 0.691655
| 0
| 0.226092
| 0.323061
| 4,835
| 95
| 92
| 50.894737
| 0.205927
| 0
| 0
| 0.534091
| 0
| 0
| 0.018197
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 1
| 0.034091
| false
| 0
| 0.056818
| 0
| 0.090909
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7451a926807a54b7c86f48984c299cffd30f1592
| 62,245
|
py
|
Python
|
tests/test_send_to_slack.py
|
virdesai/stock-analysis-engine
|
0ca501277c632150717ca499121a34f8f8c71ccb
|
[
"Apache-2.0"
] | 819
|
2018-09-16T20:33:11.000Z
|
2022-03-30T21:18:23.000Z
|
tests/test_send_to_slack.py
|
gvpathi/stock-analysis-engine
|
0ca501277c632150717ca499121a34f8f8c71ccb
|
[
"Apache-2.0"
] | 14
|
2018-09-16T20:52:25.000Z
|
2020-09-06T12:36:36.000Z
|
tests/test_send_to_slack.py
|
gvpathi/stock-analysis-engine
|
0ca501277c632150717ca499121a34f8f8c71ccb
|
[
"Apache-2.0"
] | 226
|
2018-09-16T20:04:32.000Z
|
2022-03-31T01:41:14.000Z
|
"""
Test file for:
update prices
"""
import mock
import os
import matplotlib.pyplot as plt
from types import SimpleNamespace
from analysis_engine.mocks.base_test import BaseTestCase
from analysis_engine.consts import SUCCESS
from analysis_engine.consts import FAILED
from analysis_engine.send_to_slack import post_failure
from analysis_engine.send_to_slack import post_message
from analysis_engine.send_to_slack import post_success
from analysis_engine.send_to_slack import post_plot
def mock_request_success_result(url, data=None, params=None, files=None):
"""mock_request_success_result
:param kwargs: keyword args dict
"""
res = {'status_code': 200}
return SimpleNamespace(**res)
# end of mock_request_success_result
def mock_request_failure_result(url, data=None, params=None, files=None):
"""mock_request_failure_result
:param kwargs: keyword args dict
"""
res = {'status_code': 400}
return SimpleNamespace(**res)
# end of mock_request_failure_result
class TestSendToSlack(BaseTestCase):
"""TestSendToSlack"""
backupWebhook = None
backupAccessToken = None
backupChannels = None
def setUp(self):
"""setUp"""
if os.getenv('SLACK_WEBHOOK'):
self.backupWebhook = os.getenv('SLACK_WEBHOOK')
os.environ['SLACK_WEBHOOK'] = 'https://test.com'
if os.getenv('SLACK_ACCESS_TOKEN'):
self.backupWebhook = os.getenv('SLACK_ACCESS_TOKEN')
os.environ['SLACK_ACCESS_TOKEN'] = 'test_access_token'
if os.getenv('SLACK_PUBLISH_PLOT_CHANNELS'):
self.backupWebhook = os.getenv('SLACK_PUBLISH_PLOT_CHANNELS')
os.environ['SLACK_PUBLISH_PLOT_CHANNELS'] = 'general'
# end of setUp
def tearDown(self):
"""tearDown"""
if self.backupWebhook:
os.environ['SLACK_WEBHOOK'] = self.backupWebhook
self.backupWebhook = None
else:
os.environ.pop('SLACK_WEBHOOK', None)
if self.backupAccessToken:
os.environ['SLACK_ACCESS_TOKEN'] = self.backupAccessToken
self.backupAccessToken = None
else:
os.environ.pop('SLACK_ACCESS_TOKEN', None)
if self.backupChannels:
os.environ['SLACK_PUBLISH_PLOT_CHANNELS'] = self.backupChannels
self.backupChannels = None
else:
os.environ.pop('SLACK_PUBLISH_PLOT_CHANNELS', None)
# end of tearDown
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_string_success(self):
"""test_post_success_send_to_slack_string_success"""
res = post_success('test')
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_string_success
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_string_failure(self):
"""test_post_success_send_to_slack_string_failure"""
res = post_success('test')
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_string_failure
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_string_success(self):
"""test_post_failure_send_to_slack_string_success"""
res = post_failure('test')
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_string_success
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_string_failure(self):
"""test_post_failure_send_to_slack_string_failure"""
res = post_failure('test')
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_string_failure
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_string_success(self):
"""test_post_message_send_to_slack_string_success"""
res = post_message('test')
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_string_success
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_string_failure(self):
"""test_post_message_send_to_slack_string_failure"""
res = post_message('test')
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_string_failure
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_dict_success(self):
"""test_post_success_send_to_slack_dict_success"""
res = post_success({'test': 'value'})
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_dict_success
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_dict_failure(self):
"""test_post_success_send_to_slack_dict_failure"""
res = post_success({'test': 'value'})
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_dict_failure
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_dict_success(self):
"""test_post_failure_send_to_slack_dict_success"""
res = post_failure({'test': 'value'})
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_dict_success
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_dict_failure(self):
"""test_post_failure_send_to_slack_dict_failure"""
res = post_failure({'test': 'value'})
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_dict_failure
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_dict_success(self):
"""test_post_message_send_to_slack_dict_success"""
res = post_message({'test': 'value'})
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_dict_success
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_dict_failure(self):
"""test_post_message_send_to_slack_dict_failure"""
res = post_message({'test': 'value'})
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_dict_failure
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_list_success(self):
"""test_post_success_send_to_slack_list_success"""
res = post_success(['test', 'test 2'])
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_list_success
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_list_failure(self):
"""test_post_success_send_to_slack_list_failure"""
res = post_success(['test', 'test 2'])
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_list_failure
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_list_success(self):
"""test_post_failure_send_to_slack_list_success"""
res = post_failure(['test', 'test 2'])
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_list_success
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_list_failure(self):
"""test_post_failure_send_to_slack_list_failure"""
res = post_failure(['test', 'test 2'])
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_list_failure
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_list_success(self):
"""test_post_message_send_to_slack_list_success"""
res = post_message(['test', 'test 2'])
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_list_success
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_list_failure(self):
"""test_post_message_send_to_slack_list_failure"""
res = post_message(['test', 'test 2'])
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_list_failure
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_string_success_jupyter(self):
"""test_post_success_send_to_slack_string_success_jupyter"""
res = post_success('test', jupyter=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_string_success_jupyter
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_string_failure_jupyter(self):
"""test_post_success_send_to_slack_string_failure_jupyter"""
res = post_success('test', jupyter=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_string_failure_jupyter
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_string_success_jupyter(self):
"""test_post_failure_send_to_slack_string_success_jupyter"""
res = post_failure('test', jupyter=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_string_success_jupyter
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_string_failure_jupyter(self):
"""test_post_failure_send_to_slack_string_failure_jupyter"""
res = post_failure('test', jupyter=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_string_failure_jupyter
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_string_success_jupyter(self):
"""test_post_message_send_to_slack_string_success_jupyter"""
res = post_message('test', jupyter=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_string_success_jupyter
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_string_failure_jupyter(self):
"""test_post_message_send_to_slack_string_failure_jupyter"""
res = post_message('test', jupyter=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_string_failure_jupyter
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_dict_success_jupyter(self):
"""test_post_success_send_to_slack_dict_success_jupyter"""
res = post_success({'test': 'value'}, jupyter=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_dict_success_jupyter
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_dict_failure_jupyter(self):
"""test_post_success_send_to_slack_dict_failure_jupyter"""
res = post_success({'test': 'value'}, jupyter=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_dict_failure_jupyter
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_dict_success_jupyter(self):
"""test_post_failure_send_to_slack_dict_success_jupyter"""
res = post_failure({'test': 'value'}, jupyter=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_dict_success_jupyter
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_dict_failure_jupyter(self):
"""test_post_failure_send_to_slack_dict_failure_jupyter"""
res = post_failure({'test': 'value'}, jupyter=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_dict_failure_jupyter
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_dict_success_jupyter(self):
"""test_post_message_send_to_slack_dict_success_jupyter"""
res = post_message({'test': 'value'}, jupyter=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_dict_success_jupyter
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_dict_failure_jupyter(self):
"""test_post_message_send_to_slack_dict_failure_jupyter"""
res = post_message({'test': 'value'}, jupyter=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_dict_failure_jupyter
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_list_success_jupyter(self):
"""test_post_success_send_to_slack_list_success_jupyter"""
res = post_success(['test', 'test 2'], jupyter=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_list_success_jupyter
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_list_failure_jupyter(self):
"""test_post_success_send_to_slack_list_failure_jupyter"""
res = post_success(['test', 'test 2'], jupyter=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_list_failure_jupyter
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_list_success_jupyter(self):
"""test_post_failure_send_to_slack_list_success_jupyter"""
res = post_failure(['test', 'test 2'], jupyter=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_list_success_jupyter
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_list_failure_jupyter(self):
"""test_post_failure_send_to_slack_list_failure_jupyter"""
res = post_failure(['test', 'test 2'], jupyter=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_list_failure_jupyter
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_list_success_jupyter(self):
"""test_post_message_send_to_slack_list_success_jupyter"""
res = post_message(['test', 'test 2'], jupyter=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_list_success_jupyter
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_list_failure_jupyter(self):
"""test_post_message_send_to_slack_list_failure_jupyter"""
res = post_message(['test', 'test 2'], jupyter=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_list_failure_jupyter
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_string_success_block(self):
"""test_post_success_send_to_slack_string_success_block"""
res = post_success('test', block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_string_success_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_string_failure_block(self):
"""test_post_success_send_to_slack_string_failure_block"""
res = post_success('test', block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_string_failure_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_string_success_block(self):
"""test_post_failure_send_to_slack_string_success_block"""
res = post_failure('test', block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_string_success_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_string_failure_block(self):
"""test_post_failure_send_to_slack_string_failure_block"""
res = post_failure('test', block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_string_failure_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_string_success_block(self):
"""test_post_message_send_to_slack_string_success_block"""
res = post_message('test', block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_string_success_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_string_failure_block(self):
"""test_post_message_send_to_slack_string_failure_block"""
res = post_message('test', block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_string_failure_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_dict_success_block(self):
"""test_post_success_send_to_slack_dict_success_block"""
res = post_success({'test': 'value'}, block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_dict_success_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_dict_failure_block(self):
"""test_post_success_send_to_slack_dict_failure_block"""
res = post_success({'test': 'value'}, block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_dict_failure_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_dict_success_block(self):
"""test_post_failure_send_to_slack_dict_success_block"""
res = post_failure({'test': 'value'}, block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_dict_success_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_dict_failure_block(self):
"""test_post_failure_send_to_slack_dict_failure_block"""
res = post_failure({'test': 'value'}, block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_dict_failure_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_dict_success_block(self):
"""test_post_message_send_to_slack_dict_success_block"""
res = post_message({'test': 'value'}, block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_dict_success_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_dict_failure_block(self):
"""test_post_message_send_to_slack_dict_failure_block"""
res = post_message({'test': 'value'}, block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_dict_failure_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_list_success_block(self):
"""test_post_success_send_to_slack_list_success_block"""
res = post_success(['test', 'test 2'], block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_list_success_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_list_failure_block(self):
"""test_post_success_send_to_slack_list_failure_block"""
res = post_success(['test', 'test 2'], block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_list_failure_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_list_success_block(self):
"""test_post_failure_send_to_slack_list_success_block"""
res = post_failure(['test', 'test 2'], block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_list_success_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_list_failure_block(self):
"""test_post_failure_send_to_slack_list_failure_block"""
res = post_failure(['test', 'test 2'], block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_list_failure_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_list_success_block(self):
"""test_post_message_send_to_slack_list_success_block"""
res = post_message(['test', 'test 2'], block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_list_success_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_list_failure_block(self):
"""test_post_message_send_to_slack_list_failure_block"""
res = post_message(['test', 'test 2'], block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_list_failure_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_string_success_width(self):
"""test_post_success_send_to_slack_string_success_width"""
res = post_success('test', full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_string_success_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_string_failure_width(self):
"""test_post_success_send_to_slack_string_failure_width"""
res = post_success('test', full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_string_failure_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_string_success_width(self):
"""test_post_failure_send_to_slack_string_success_width"""
res = post_failure('test', full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_string_success_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_string_failure_width(self):
"""test_post_failure_send_to_slack_string_failure_width"""
res = post_failure('test', full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_string_failure_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_string_success_width(self):
"""test_post_message_send_to_slack_string_success_width"""
res = post_message('test', full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_string_success_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_string_failure_width(self):
"""test_post_message_send_to_slack_string_failure_width"""
res = post_message('test', full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_string_failure_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_dict_success_width(self):
"""test_post_success_send_to_slack_dict_success_width"""
res = post_success({'test': 'value'}, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_dict_success_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_dict_failure_width(self):
"""test_post_success_send_to_slack_dict_failure_width"""
res = post_success({'test': 'value'}, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_dict_failure_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_dict_success_width(self):
"""test_post_failure_send_to_slack_dict_success_width"""
res = post_failure({'test': 'value'}, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_dict_success_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_dict_failure_width(self):
"""test_post_failure_send_to_slack_dict_failure_width"""
res = post_failure({'test': 'value'}, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_dict_failure_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_dict_success_width(self):
"""test_post_message_send_to_slack_dict_success_width"""
res = post_message({'test': 'value'}, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_dict_success_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_dict_failure_width(self):
"""test_post_message_send_to_slack_dict_failure_width"""
res = post_message({'test': 'value'}, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_dict_failure_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_list_success_width(self):
"""test_post_success_send_to_slack_list_success_width"""
res = post_success(['test', 'test 2'], full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_list_success_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_list_failure_width(self):
"""test_post_success_send_to_slack_list_failure_width"""
res = post_success(['test', 'test 2'], full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_list_failure_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_list_success_width(self):
"""test_post_failure_send_to_slack_list_success_width"""
res = post_failure(['test', 'test 2'], full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_list_success_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_list_failure_width(self):
"""test_post_failure_send_to_slack_list_failure_width"""
res = post_failure(['test', 'test 2'], full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_list_failure_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_list_success_width(self):
"""test_post_message_send_to_slack_list_success_width"""
res = post_message(['test', 'test 2'], full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_list_success_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_list_failure_width(self):
"""test_post_message_send_to_slack_list_failure_width"""
res = post_message(['test', 'test 2'], full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_list_failure_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_string_success_jupyter_block(self):
"""test_post_success_send_to_slack_string_success_jupyter_block"""
res = post_success('test', jupyter=True, block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_string_success_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_string_failure_jupyter_block(self):
"""test_post_success_send_to_slack_string_failure_jupyter_block"""
res = post_success('test', jupyter=True, block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_string_failure_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_string_success_jupyter_block(self):
"""test_post_failure_send_to_slack_string_success_jupyter_block"""
res = post_failure('test', jupyter=True, block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_string_success_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_string_failure_jupyter_block(self):
"""test_post_failure_send_to_slack_string_failure_jupyter_block"""
res = post_failure('test', jupyter=True, block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_string_failure_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_string_success_jupyter_block(self):
"""test_post_message_send_to_slack_string_success_jupyter_block"""
res = post_message('test', jupyter=True, block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_string_success_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_string_failure_jupyter_block(self):
"""test_post_message_send_to_slack_string_failure_jupyter_block"""
res = post_message('test', jupyter=True, block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_string_failure_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_dict_success_jupyter_block(self):
"""test_post_success_send_to_slack_dict_success_jupyter_block"""
res = post_success({'test': 'value'}, jupyter=True, block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_dict_success_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_dict_failure_jupyter_block(self):
"""test_post_success_send_to_slack_dict_failure_jupyter_block"""
res = post_success({'test': 'value'}, jupyter=True, block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_dict_failure_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_dict_success_jupyter_block(self):
"""test_post_failure_send_to_slack_dict_success_jupyter_block"""
res = post_failure({'test': 'value'}, jupyter=True, block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_dict_success_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_dict_failure_jupyter_block(self):
"""test_post_failure_send_to_slack_dict_failure_jupyter_block"""
res = post_failure({'test': 'value'}, jupyter=True, block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_dict_failure_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_dict_success_jupyter_block(self):
"""test_post_message_send_to_slack_dict_success_jupyter_block"""
res = post_message({'test': 'value'}, jupyter=True, block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_dict_success_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_dict_failure_jupyter_block(self):
"""test_post_message_send_to_slack_dict_failure_jupyter_block"""
res = post_message({'test': 'value'}, jupyter=True, block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_dict_failure_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_list_success_jupyter_block(self):
"""test_post_success_send_to_slack_list_success_jupyter_block"""
res = post_success(['test', 'test 2'], jupyter=True, block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_list_success_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_list_failure_jupyter_block(self):
"""test_post_success_send_to_slack_list_failure_jupyter_block"""
res = post_success(['test', 'test 2'], jupyter=True, block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_list_failure_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_list_success_jupyter_block(self):
"""test_post_failure_send_to_slack_list_success_jupyter_block"""
res = post_failure(['test', 'test 2'], jupyter=True, block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_list_success_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_list_failure_jupyter_block(self):
"""test_post_failure_send_to_slack_list_failure_jupyter_block"""
res = post_failure(['test', 'test 2'], jupyter=True, block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_list_failure_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_list_success_jupyter_block(self):
"""test_post_message_send_to_slack_list_success_jupyter_block"""
res = post_message(['test', 'test 2'], jupyter=True, block=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_list_success_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_list_failure_jupyter_block(self):
"""test_post_message_send_to_slack_list_failure_jupyter_block"""
res = post_message(['test', 'test 2'], jupyter=True, block=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_list_failure_jupyter_block
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_string_success_jupyter_width(self):
"""test_post_success_send_to_slack_string_success_jupyter_width"""
res = post_success('test', jupyter=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_string_success_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_string_failure_jupyter_width(self):
"""test_post_success_send_to_slack_string_failure_jupyter_width"""
res = post_success('test', jupyter=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_string_failure_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_string_success_jupyter_width(self):
"""test_post_failure_send_to_slack_string_success_jupyter_width"""
res = post_failure('test', jupyter=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_string_success_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_string_failure_jupyter_width(self):
"""test_post_failure_send_to_slack_string_failure_jupyter_width"""
res = post_failure('test', jupyter=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_string_failure_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_string_success_jupyter_width(self):
"""test_post_message_send_to_slack_string_success_jupyter_width"""
res = post_message('test', jupyter=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_string_success_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_string_failure_jupyter_width(self):
"""test_post_message_send_to_slack_string_failure_jupyter_width"""
res = post_message('test', jupyter=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_string_failure_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_dict_success_jupyter_width(self):
"""test_post_success_send_to_slack_dict_success_jupyter_width"""
res = post_success({'test': 'value'}, jupyter=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_dict_success_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_dict_failure_jupyter_width(self):
"""test_post_success_send_to_slack_dict_failure_jupyter_width"""
res = post_success({'test': 'value'}, jupyter=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_dict_failure_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_dict_success_jupyter_width(self):
"""test_post_failure_send_to_slack_dict_success_jupyter_width"""
res = post_failure({'test': 'value'}, jupyter=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_dict_success_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_dict_failure_jupyter_width(self):
"""test_post_failure_send_to_slack_dict_failure_jupyter_width"""
res = post_failure({'test': 'value'}, jupyter=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_dict_failure_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_dict_success_jupyter_width(self):
"""test_post_message_send_to_slack_dict_success_jupyter_width"""
res = post_message({'test': 'value'}, jupyter=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_dict_success_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_dict_failure_jupyter_width(self):
"""test_post_message_send_to_slack_dict_failure_jupyter_width"""
res = post_message({'test': 'value'}, jupyter=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_dict_failure_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_list_success_jupyter_width(self):
"""test_post_success_send_to_slack_list_success_jupyter_width"""
res = post_success(['test', 'test 2'], jupyter=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_list_success_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_list_failure_jupyter_width(self):
"""test_post_success_send_to_slack_list_failure_jupyter_width"""
res = post_success(['test', 'test 2'], jupyter=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_list_failure_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_list_success_jupyter_width(self):
"""test_post_failure_send_to_slack_list_success_jupyter_width"""
res = post_failure(['test', 'test 2'], jupyter=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_list_success_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_list_failure_jupyter_width(self):
"""test_post_failure_send_to_slack_list_failure_jupyter_width"""
res = post_failure(['test', 'test 2'], jupyter=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_list_failure_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_list_success_jupyter_width(self):
"""test_post_message_send_to_slack_list_success_jupyter_width"""
res = post_message(['test', 'test 2'], jupyter=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_list_success_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_list_failure_jupyter_width(self):
"""test_post_message_send_to_slack_list_failure_jupyter_width"""
res = post_message(['test', 'test 2'], jupyter=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_list_failure_jupyter_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_string_success_block_width(self):
"""test_post_success_send_to_slack_string_success_block_width"""
res = post_success('test', block=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_string_success_block_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_string_failure_block_width(self):
"""test_post_success_send_to_slack_string_failure_block_width"""
res = post_success('test', block=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_string_failure_block_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_string_success_block_width(self):
"""test_post_failure_send_to_slack_string_success_block_width"""
res = post_failure('test', block=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_string_success_block_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_string_failure_block_width(self):
"""test_post_failure_send_to_slack_string_failure_block_width"""
res = post_failure('test', block=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_string_failure_block_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_string_success_block_width(self):
"""test_post_message_send_to_slack_string_success_block_width"""
res = post_message('test', block=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_string_success_block_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_string_failure_block_width(self):
"""test_post_message_send_to_slack_string_failure_block_width"""
res = post_message('test', block=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_string_failure_block_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_dict_success_block_width(self):
"""test_post_success_send_to_slack_dict_success_block_width"""
res = post_success({'test': 'value'}, block=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_dict_success_block_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_dict_failure_block_width(self):
"""test_post_success_send_to_slack_dict_failure_block_width"""
res = post_success({'test': 'value'}, block=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_dict_failure_block_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_dict_success_block_width(self):
"""test_post_failure_send_to_slack_dict_success_block_width"""
res = post_failure({'test': 'value'}, block=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_dict_success_block_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_dict_failure_block_width(self):
"""test_post_failure_send_to_slack_dict_failure_block_width"""
res = post_failure({'test': 'value'}, block=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_dict_failure_block_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_dict_success_block_width(self):
"""test_post_message_send_to_slack_dict_success_block_width"""
res = post_message({'test': 'value'}, block=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_dict_success_block_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_dict_failure_block_width(self):
"""test_post_message_send_to_slack_dict_failure_block_width"""
res = post_message({'test': 'value'}, block=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_dict_failure_block_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_list_success_block_width(self):
"""test_post_success_send_to_slack_list_success_block_width"""
res = post_success(['test', 'test 2'], block=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_list_success_block_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_list_failure_block_width(self):
"""test_post_success_send_to_slack_list_failure_block_width"""
res = post_success(['test', 'test 2'], block=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_list_failure_block_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_list_success_block_width(self):
"""test_post_failure_send_to_slack_list_success_block_width"""
res = post_failure(['test', 'test 2'], block=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_list_success_block_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_list_failure_block_width(self):
"""test_post_failure_send_to_slack_list_failure_block_width"""
res = post_failure(['test', 'test 2'], block=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_list_failure_block_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_list_success_block_width(self):
"""test_post_message_send_to_slack_list_success_block_width"""
res = post_message(['test', 'test 2'], block=True, full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_list_success_block_width
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_list_failure_block_width(self):
"""test_post_message_send_to_slack_list_failure_block_width"""
res = post_message(['test', 'test 2'], block=True, full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_list_failure_block_width
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_string_success_all(self):
"""test_post_success_send_to_slack_string_success_all"""
res = post_success('test',
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_string_success_all
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_string_failure_all(self):
"""test_post_success_send_to_slack_string_failure_all"""
res = post_success('test',
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_string_failure_all
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_string_success_all(self):
"""test_post_failure_send_to_slack_string_success_all"""
res = post_failure('test',
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_string_success_all
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_string_failure_all(self):
"""test_post_failure_send_to_slack_string_failure_all"""
res = post_failure('test',
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_string_failure_all
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_string_success_all(self):
"""test_post_message_send_to_slack_string_success_all"""
res = post_message('test',
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_string_success_all
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_string_failure_all(self):
"""test_post_message_send_to_slack_string_failure_all"""
res = post_message('test',
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_string_failure_all
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_dict_success_all(self):
"""test_post_success_send_to_slack_dict_success_all"""
res = post_success({'test': 'value'},
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_dict_success_all
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_dict_failure_all(self):
"""test_post_success_send_to_slack_dict_failure_all"""
res = post_success({'test': 'value'},
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_dict_failure_all
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_dict_success_all(self):
"""test_post_failure_send_to_slack_dict_success_all"""
res = post_failure({'test': 'value'},
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_dict_success_all
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_dict_failure_all(self):
"""test_post_failure_send_to_slack_dict_failure_all"""
res = post_failure({'test': 'value'},
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_dict_failure_all
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_dict_success_all(self):
"""test_post_message_send_to_slack_dict_success_all"""
res = post_message({'test': 'value'},
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_dict_success_all
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_dict_failure_all(self):
"""test_post_message_send_to_slack_dict_failure_all"""
res = post_message({'test': 'value'},
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_dict_failure_all
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_success_send_to_slack_list_success_all(self):
"""test_post_success_send_to_slack_list_success_all"""
res = post_success(['test', 'test 2'],
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_success_send_to_slack_list_success_all
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_success_send_to_slack_list_failure_all(self):
"""test_post_success_send_to_slack_list_failure_all"""
res = post_success(['test', 'test 2'],
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_success_send_to_slack_list_failure_all
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_failure_send_to_slack_list_success_all(self):
"""test_post_failure_send_to_slack_list_success_all"""
res = post_failure(['test', 'test 2'],
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_failure_send_to_slack_list_success_all
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_failure_send_to_slack_list_failure_all(self):
"""test_post_failure_send_to_slack_list_failure_all"""
res = post_failure(['test', 'test 2'],
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_failure_send_to_slack_list_failure_all
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_message_send_to_slack_list_success_all(self):
"""test_post_message_send_to_slack_list_success_all"""
res = post_message(['test', 'test 2'],
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_message_send_to_slack_list_success_all
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_message_send_to_slack_list_failure_all(self):
"""test_post_message_send_to_slack_list_failure_all"""
res = post_message(['test', 'test 2'],
jupyter=True,
block=True,
full_width=True)
self.assertTrue(res['status'] == FAILED)
# end of test_post_message_send_to_slack_list_failure_all
@mock.patch(
'requests.post',
new=mock_request_success_result)
def test_post_plot_send_to_slack_success(self):
"""test_post_plot_send_to_slack_success"""
res = post_plot(plt)
self.assertTrue(res['status'] == SUCCESS)
# end of test_post_plot_send_to_slack_success
@mock.patch(
'requests.post',
new=mock_request_failure_result)
def test_post_plot_send_to_slack_failure(self):
"""test_post_plot_send_to_slack_failure"""
res = post_plot(plt)
self.assertTrue(res['status'] == FAILED)
# end of test_post_plot_send_to_slack_failure
# end of TestSendToSlack
| 42.957212
| 77
| 0.704908
| 8,191
| 62,245
| 4.828348
| 0.010499
| 0.067056
| 0.122936
| 0.077524
| 0.980505
| 0.974386
| 0.969203
| 0.96493
| 0.956636
| 0.910794
| 0
| 0.001084
| 0.199422
| 62,245
| 1,448
| 78
| 42.986878
| 0.792563
| 0.272022
| 0
| 0.807574
| 0
| 0
| 0.095309
| 0.003042
| 0
| 0
| 0
| 0
| 0.149437
| 1
| 0.153531
| false
| 0
| 0.011259
| 0
| 0.170931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
747c8d162a4857182ca978b7748ae78c5a6a4567
| 139
|
py
|
Python
|
game_errors.py
|
kevxiao/cow-egg-game
|
73ca6ec452a281f4bf0c2d189fae445380b2cc3d
|
[
"Apache-2.0"
] | null | null | null |
game_errors.py
|
kevxiao/cow-egg-game
|
73ca6ec452a281f4bf0c2d189fae445380b2cc3d
|
[
"Apache-2.0"
] | null | null | null |
game_errors.py
|
kevxiao/cow-egg-game
|
73ca6ec452a281f4bf0c2d189fae445380b2cc3d
|
[
"Apache-2.0"
] | null | null | null |
class MoveError(object):
def __init__(self, msg):
self.error_msg = msg
def get_error(self):
return self.error_msg
| 19.857143
| 29
| 0.640288
| 19
| 139
| 4.315789
| 0.526316
| 0.219512
| 0.292683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266187
| 139
| 7
| 29
| 19.857143
| 0.803922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
776604fda95cec58c5b7b30853e7ba059e5e71ea
| 15,120
|
py
|
Python
|
diagan-pkg/diagan/models/topk_models.py
|
lee-jinhee/self-diagnosing-gan
|
da87dd1ef10f2d630d6904ced63ae8805b5db356
|
[
"Apache-2.0"
] | 16
|
2021-02-25T06:48:51.000Z
|
2022-03-04T13:08:12.000Z
|
diagan-pkg/diagan/models/topk_models.py
|
lee-jinhee/self-diagnosing-gan
|
da87dd1ef10f2d630d6904ced63ae8805b5db356
|
[
"Apache-2.0"
] | null | null | null |
diagan-pkg/diagan/models/topk_models.py
|
lee-jinhee/self-diagnosing-gan
|
da87dd1ef10f2d630d6904ced63ae8805b5db356
|
[
"Apache-2.0"
] | 4
|
2021-03-02T02:03:17.000Z
|
2022-03-24T03:31:12.000Z
|
import torch
import torch.nn.functional as F
from torch_mimicry.nets import sngan
from torch_mimicry.nets import infomax_gan
from torch_mimicry.nets import ssgan
def _bce_loss_with_logits(output, labels, **kwargs):
r"""
Wrapper for BCE loss with logits.
"""
return F.binary_cross_entropy_with_logits(output, labels, reduction='none', **kwargs)
class TopKGenerator:
def __init__(self, use_topk=False, decay_steps=2000):
self.use_topk = use_topk
self.topk_rate = 1
self.decay_rate = 0.99
self.decay_steps = 2000 # Unused
self.min_topk_rate = 0.5
def decay_topk_rate(self, step, epoch_steps=None):
assert self.use_topk
if epoch_steps:
epoch = step // epoch_steps
else:
epoch = step // self.decay_steps
self.topk_rate = max(self.decay_rate ** epoch, self.min_topk_rate)
def get_topk(self, x, return_index=False):
N = x.size(0)
k = int(self.topk_rate * N)
x, idx = torch.topk(x, k=k, dim=0)
if return_index:
return x, idx
else:
return x
class TopkSNGANGenerator32(sngan.SNGANGenerator32, TopKGenerator):
def __init__(self, topk=False, **kwargs):
TopKGenerator.__init__(self, use_topk=topk)
sngan.SNGANGenerator32.__init__(self, **kwargs)
print(f"Load SNGAN32 model topk: {topk} loss: {self.loss_type}")
def train_step(self,
real_batch,
netD,
optG,
log_data,
device=None,
global_step=None,
scaler=None,
**kwargs):
r"""
Takes one training step for G.
Args:
real_batch (Tensor): A batch of real images of shape (N, C, H, W).
Used for obtaining current batch size.
netD (nn.Module): Discriminator model for obtaining losses.
optG (Optimizer): Optimizer for updating generator's parameters.
log_data (dict): A dict mapping name to values for logging uses.
device (torch.device): Device to use for running the model.
global_step (int): Variable to sync training, logging and checkpointing.
Useful for dynamic changes to model amidst training.
Returns:
Returns MetricLog object containing updated logging variables after 1 training step.
"""
self.zero_grad()
# Get only batch size from real batch
batch_size = real_batch[0].shape[0]
if scaler is None:
# Produce fake images
fake_images = self.generate_images(num_images=batch_size,
device=device)
# Compute output logit of D thinking image real
output = netD(fake_images)
output = self.get_topk(output)
# Compute loss
errG = self.compute_gan_loss(output=output)
# Backprop and update gradients
errG.backward()
optG.step()
else:
with torch.cuda.amp.autocast():
# Produce fake images
fake_images = self.generate_images(num_images=batch_size,
device=device)
# Compute output logit of D thinking image real
output = netD(fake_images)
output = self.get_topk(output)
# Compute loss
errG = self.compute_gan_loss(output=output)
# Backprop and update gradients
scaler.scale(errG).backward()
scaler.step(optG)
scaler.update()
# Log statistics
log_data.add_metric('errG', errG, group='loss')
return log_data
class TopkSNGANGenerator64(sngan.SNGANGenerator64, TopKGenerator):
def __init__(self, topk=False, **kwargs):
TopKGenerator.__init__(self, use_topk=topk)
sngan.SNGANGenerator64.__init__(self, **kwargs)
print(f"Load SNGAN64 model topk: {topk} loss: {self.loss_type}")
def train_step(self,
real_batch,
netD,
optG,
log_data,
device=None,
global_step=None,
scaler=None,
**kwargs):
r"""
Takes one training step for G.
Args:
real_batch (Tensor): A batch of real images of shape (N, C, H, W).
Used for obtaining current batch size.
netD (nn.Module): Discriminator model for obtaining losses.
optG (Optimizer): Optimizer for updating generator's parameters.
log_data (dict): A dict mapping name to values for logging uses.
device (torch.device): Device to use for running the model.
global_step (int): Variable to sync training, logging and checkpointing.
Useful for dynamic changes to model amidst training.
Returns:
Returns MetricLog object containing updated logging variables after 1 training step.
"""
self.zero_grad()
# Get only batch size from real batch
batch_size = real_batch[0].shape[0]
if scaler is None:
# Produce fake images
fake_images = self.generate_images(num_images=batch_size,
device=device)
# Compute output logit of D thinking image real
output = netD(fake_images)
output = self.get_topk(output)
# Compute loss
errG = self.compute_gan_loss(output=output)
# Backprop and update gradients
errG.backward()
optG.step()
else:
with torch.cuda.amp.autocast():
# Produce fake images
fake_images = self.generate_images(num_images=batch_size,
device=device)
# Compute output logit of D thinking image real
output = netD(fake_images)
output = self.get_topk(output)
# Compute loss
errG = self.compute_gan_loss(output=output)
# Backprop and update gradients
scaler.scale(errG).backward()
scaler.step(optG)
scaler.update()
# Log statistics
log_data.add_metric('errG', errG, group='loss')
return log_data
class TopkInfoMaxGANGenerator32(infomax_gan.InfoMaxGANGenerator32, TopKGenerator):
def __init__(self, topk=False, **kwargs):
TopKGenerator.__init__(self, use_topk=topk)
infomax_gan.InfoMaxGANGenerator32.__init__(self, **kwargs)
print(f"Load InfoMaxGANGenerator32 model topk: {topk} loss: {self.loss_type}")
def train_step(self,
real_batch,
netD,
optG,
log_data,
device=None,
global_step=None,
**kwargs):
# Zero gradient every step.
self.zero_grad()
# Get only batch size from real batch
real_images, _, _ = real_batch
batch_size = real_images.shape[0]
# Produce fake images
fake_images = self.generate_images(num_images=batch_size,
device=device)
# Get logits and projected features
output_fake, local_feat_fake, global_feat_fake = netD(fake_images)
_, idx = self.get_topk(output_fake, return_index=True)
idx = idx.view(-1)
output_fake = output_fake[idx]
local_feat_fake = local_feat_fake[idx]
global_feat_fake = global_feat_fake[idx]
local_feat_fake, global_feat_fake = netD.project_features(
local_feat=local_feat_fake, global_feat=global_feat_fake)
# Compute losses
errG = self.compute_gan_loss(output_fake)
errG_IM = netD.compute_infomax_loss(local_feat=local_feat_fake,
global_feat=global_feat_fake,
scale=self.infomax_loss_scale)
# Backprop and update gradients
errG_total = errG + errG_IM
errG_total.backward()
optG.step()
# Log statistics
log_data.add_metric('errG', errG, group='loss')
log_data.add_metric('errG_IM', errG_IM, group='loss_IM')
return log_data
class TopkInfoMaxGANGenerator64(infomax_gan.InfoMaxGANGenerator64, TopKGenerator):
def __init__(self, topk=False, **kwargs):
TopKGenerator.__init__(self, use_topk=topk)
infomax_gan.InfoMaxGANGenerator64.__init__(self, **kwargs)
print(f"Load InfoMaxGANGenerator64 model topk: {topk} loss: {self.loss_type}")
def train_step(self,
real_batch,
netD,
optG,
log_data,
device=None,
global_step=None,
**kwargs):
# Zero gradient every step.
self.zero_grad()
# Get only batch size from real batch
real_images, _, _ = real_batch
batch_size = real_images.shape[0]
# Produce fake images
fake_images = self.generate_images(num_images=batch_size,
device=device)
# Get logits and projected features
output_fake, local_feat_fake, global_feat_fake = netD(fake_images)
_, idx = self.get_topk(output_fake, return_index=True)
idx = idx.view(-1)
output_fake = output_fake[idx]
local_feat_fake = local_feat_fake[idx]
global_feat_fake = global_feat_fake[idx]
local_feat_fake, global_feat_fake = netD.project_features(
local_feat=local_feat_fake, global_feat=global_feat_fake)
# Compute losses
errG = self.compute_gan_loss(output_fake)
errG_IM = netD.compute_infomax_loss(local_feat=local_feat_fake,
global_feat=global_feat_fake,
scale=self.infomax_loss_scale)
# Backprop and update gradients
errG_total = errG + errG_IM
errG_total.backward()
optG.step()
# Log statistics
log_data.add_metric('errG', errG, group='loss')
log_data.add_metric('errG_IM', errG_IM, group='loss_IM')
return log_data
class TopkSSGANGenerator32(ssgan.SSGANGenerator32, TopKGenerator):
def __init__(self, topk=False, **kwargs):
TopKGenerator.__init__(self, use_topk=topk)
ssgan.SSGANGenerator32.__init__(self, **kwargs)
print(f"Load SSGANGenerator32 model topk: {topk} loss: {self.loss_type}")
def train_step(self,
real_batch,
netD,
optG,
log_data,
device=None,
global_step=None,
**kwargs):
r"""
Takes one training step for G.
Args:
real_batch (Tensor): A batch of real images of shape (N, C, H, W).
Used for obtaining current batch size.
netD (nn.Module): Discriminator model for obtaining losses.
optG (Optimizer): Optimizer for updating generator's parameters.
log_data (MetricLog): An object to add custom metrics for visualisations.
device (torch.device): Device to use for running the model.
global_step (int): Variable to sync training, logging and checkpointing.
Useful for dynamic changes to model amidst training.
Returns:
MetricLog: Returns MetricLog object containing updated logging variables after 1 training step.
"""
self.zero_grad()
# Get only batch size from real batch
batch_size = real_batch[0].shape[0]
# Produce fake images and logits
fake_images = self.generate_images(num_images=batch_size,
device=device)
output, _ = netD(fake_images)
output = self.get_topk(output)
# Compute GAN loss, upright images only.
errG = self.compute_gan_loss(output)
# Compute SS loss, rotates the images.
errG_SS, _ = netD.compute_ss_loss(images=fake_images,
scale=self.ss_loss_scale)
# Backprop and update gradients
errG_total = errG + errG_SS
errG_total.backward()
optG.step()
# Log statistics
log_data.add_metric('errG', errG, group='loss')
log_data.add_metric('errG_SS', errG_SS, group='loss_SS')
return log_data
class TopkSSGANGenerator64(ssgan.SSGANGenerator64, TopKGenerator):
def __init__(self, topk=False, **kwargs):
TopKGenerator.__init__(self, use_topk=topk)
ssgan.SSGANGenerator64.__init__(self, **kwargs)
print(f"Load SSGANGenerator64 model topk: {topk} loss: {self.loss_type}")
def train_step(self,
real_batch,
netD,
optG,
log_data,
device=None,
global_step=None,
**kwargs):
r"""
Takes one training step for G.
Args:
real_batch (Tensor): A batch of real images of shape (N, C, H, W).
Used for obtaining current batch size.
netD (nn.Module): Discriminator model for obtaining losses.
optG (Optimizer): Optimizer for updating generator's parameters.
log_data (MetricLog): An object to add custom metrics for visualisations.
device (torch.device): Device to use for running the model.
global_step (int): Variable to sync training, logging and checkpointing.
Useful for dynamic changes to model amidst training.
Returns:
MetricLog: Returns MetricLog object containing updated logging variables after 1 training step.
"""
self.zero_grad()
# Get only batch size from real batch
batch_size = real_batch[0].shape[0]
# Produce fake images and logits
fake_images = self.generate_images(num_images=batch_size,
device=device)
output, _ = netD(fake_images)
output = self.get_topk(output)
# Compute GAN loss, upright images only.
errG = self.compute_gan_loss(output)
# Compute SS loss, rotates the images.
errG_SS, _ = netD.compute_ss_loss(images=fake_images,
scale=self.ss_loss_scale)
# Backprop and update gradients
errG_total = errG + errG_SS
errG_total.backward()
optG.step()
# Log statistics
log_data.add_metric('errG', errG, group='loss')
log_data.add_metric('errG_SS', errG_SS, group='loss_SS')
return log_data
| 35.081206
| 107
| 0.579894
| 1,702
| 15,120
| 4.915394
| 0.106933
| 0.021755
| 0.018647
| 0.019125
| 0.870667
| 0.861344
| 0.844131
| 0.844131
| 0.844131
| 0.844131
| 0
| 0.008085
| 0.345569
| 15,120
| 430
| 108
| 35.162791
| 0.837393
| 0.263426
| 0
| 0.786325
| 0
| 0
| 0.044984
| 0.003953
| 0
| 0
| 0
| 0
| 0.004274
| 1
| 0.068376
| false
| 0
| 0.021368
| 0
| 0.15812
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
77a48ade34d7c3b60583b7faefb6266bb58b24ee
| 44,272
|
py
|
Python
|
Method/helper_classes/experiment.py
|
dice-group/LearnALCLengths
|
cb019ba234092a323f3785517d1cc6152a5ef7a4
|
[
"MIT"
] | null | null | null |
Method/helper_classes/experiment.py
|
dice-group/LearnALCLengths
|
cb019ba234092a323f3785517d1cc6152a5ef7a4
|
[
"MIT"
] | null | null | null |
Method/helper_classes/experiment.py
|
dice-group/LearnALCLengths
|
cb019ba234092a323f3785517d1cc6152a5ef7a4
|
[
"MIT"
] | null | null | null |
import numpy as np, copy
import torch, random
from collections import Counter, defaultdict
from sklearn.utils import resample
from torch.utils.data import DataLoader
import sys, os, json
base_path = os.path.dirname(os.path.realpath(__file__)).split('helper_classes')[0]
sys.path.append(base_path)
from util.weightedloss import WeightedMSELoss
from helper_classes.dataloader import HeadAndRelationBatchLoader, CLPDataLoader
from concept_length_predictors.helper_classes import ConceptLengthPredictor
from torch.optim.lr_scheduler import ExponentialLR
from torch.nn import CrossEntropyLoss
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
from sklearn.metrics import f1_score, accuracy_score
import time
class Experiment:
def __init__(self, kwargs):
self.kwargs = kwargs
self.decay_rate = kwargs['decay_rate']
self.clp = ConceptLengthPredictor(kwargs)
def get_data_idxs(self, data):
data_idxs = [(self.clp.dataloader.entity_to_idx[t[0]], self.clp.dataloader.relation_to_idx[t[1]], self.clp.dataloader.entity_to_idx[t[2]]) for t in data]
return data_idxs
@staticmethod
def get_er_vocab(data):
# head entity and relation
er_vocab = defaultdict(list)
for triple in data:
er_vocab[(triple[0], triple[1])].append(triple[2])
return er_vocab
def get_batch(self, x, y, batch_size, shuffle=True):
random.seed(self.kwargs['seed'])
if shuffle:
indx = list(range(x.shape[0]))
random.shuffle(indx)
x, y = x[indx], y[indx]
if len(x) >= batch_size:
for i in range(0, x.shape[0]-batch_size+1, batch_size):
yield x[i:i+batch_size], y[i:i+batch_size]
else:
yield x, y
def get_optimizer(self, length_predictor, optimizer='Adam', embedding_model=None):
if embedding_model is not None:
if optimizer == 'Adam':
return torch.optim.Adam(list(length_predictor.parameters())+list(embedding_model.parameters()), lr=self.kwargs['learning_rate'])
elif optimizer == 'SGD':
return torch.optim.SGD(list(length_predictor.parameters())+list(embedding_model.parameters()), lr=self.kwargs['learning_rate'])
elif optimizer == 'RMSprop':
return torch.optim.RMSprop(list(length_predictor.parameters())+list(embedding_model.parameters()), lr=self.kwargs['learning_rate'])
else:
raise ValueError
print('Unsupported optimizer')
else:
if optimizer == 'Adam':
return torch.optim.Adam(length_predictor.parameters(), lr=self.kwargs['learning_rate'])
elif optimizer == 'SGD':
return torch.optim.SGD(length_predictor.parameters(), lr=self.kwargs['learning_rate'])
elif optimizer == 'RMSprop':
return torch.optim.RMSprop(length_predictor.parameters(), lr=self.kwargs['learning_rate'])
else:
raise ValueError
print('Unsupported optimizer')
@staticmethod
def remove_minority_problem_types(data:list, label='target concept length'):
"""
Function for removing class expressions whose lengths are under-represented
"""
length_counts = Counter(v[label] for _, v in data)
mean_length_count = sum(length_counts.values()) // len(length_counts)
return list(filter(lambda item: length_counts[item[1][label]] >= mean_length_count/5, data))
# def upsample_and_balance(self, data:list, label="target concept length"):
# np.random.seed(1)
# data = self.remove_minority_problem_types(data, label)
# upsampled_data = []
# length_counts = sorted(Counter(v[label] for _, v in data).items(), key=lambda item: item[1])
# lengths = [key for key, _ in length_counts]
# majority_length, max_length_count = length_counts.pop()
# upsampled_data.extend(list(filter(lambda pbm: pbm[1][label]==majority_length, data)))
# for l, _ in length_counts:
# filt_data = list(filter(lambda pbm: pbm[1][label]==l, data))
# upsampled_data.extend(resample(filt_data, replace=True, n_samples=max_length_count, random_state=123))
# random.shuffle(upsampled_data)
# return upsampled_data
def show_num_learnable_params(self):
print("*"*20+"Trainable model size"+"*"*20)
size = sum([p.numel() for p in self.clp.length_predictor.parameters()])
size_ = 0
print("Length predictor: ", size)
if self.clp.embedding_model is not None:
size_ += sum([p.numel() for p in self.clp.embedding_model.parameters()])
size += size_
print("Embedding model: ", size_)
print("Total: ", size)
print("*"*20+"Trainable model size"+"*"*20)
print()
def train(self, data_train, data_test, epochs=200, clp_batch_size=64, tc_batch_size=512, kf_n_splits=10, test=False, save_model = False, include_embedding_loss=False, optimizer = 'Adam', tc_label_smoothing=0.9, record_runtime=False):
if self.clp.embedding_model is not None:
Weights = {int(l): 1./torch.sqrt(torch.tensor(c)) for l,c in Counter([v["target concept length"] for _,v in data_train]).items()}
else:
Weights = {int(l): 1./torch.sqrt(torch.tensor(c)) for l,c in Counter(data_train[1].tolist()).items()}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Loss weights: ", Weights)
if self.clp.length_predictor.as_classification:
W = []
for l in range(max(Weights.keys())+1):
if not l in Weights:
W.append(torch.tensor(0.))
else:
W.append(Weights[l])
self.loss = CrossEntropyLoss(weight=torch.Tensor(W).to(device))
else:
self.loss = WeightedMSELoss(Weights)
self.show_num_learnable_params()
if self.clp.embedding_model is not None and include_embedding_loss:
triple_data_idxs = self.get_data_idxs(self.clp.dataloader.data)
head_to_relation_batch = list(DataLoader(
HeadAndRelationBatchLoader(er_vocab=self.get_er_vocab(triple_data_idxs), num_e=len(self.clp.dataloader.entities)),
batch_size=tc_batch_size, num_workers=12, shuffle=True))
embeddings = None
if self.clp.embedding_model is None:
embeddings = self.clp.get_embedding(embedding_model=None)
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print("Training on CPU, it may take long...")
else:
print("GPU available !")
best_performance = 0.
print()
print("#"*50)
print()
print("{} starts training on {} data set \n".format(self.clp.length_predictor.name, self.kwargs['path_to_triples'].split("/")[-3]))
print("#"*50, "\n")
length_predictor = copy.deepcopy(self.clp.length_predictor)
if not os.path.exists(self.kwargs['path_to_triples'].split("Triples")[0]+"Model_weights/"):
os.mkdir(self.kwargs['path_to_triples'].split("Triples")[0]+"Model_weights/")
if self.clp.embedding_model is not None:
embedding_model = copy.deepcopy(self.clp.embedding_model)
if train_on_gpu:
length_predictor.cuda()
if embeddings is None:
embedding_model.cuda()
if embeddings is None:
opt = self.get_optimizer(length_predictor=length_predictor, optimizer=optimizer, embedding_model=embedding_model)
else:
opt = self.get_optimizer(length_predictor=length_predictor, optimizer=optimizer)
if self.decay_rate:
self.scheduler = ExponentialLR(opt, self.decay_rate)
train_losses = []
Train_loss = []
Train_acc = []
if include_embedding_loss:
tc_iterator = 0
if record_runtime:
t0 = time.time()
Emb = embeddings if embeddings is not None else self.clp.get_embedding(embedding_model)
if self.clp.embedding_model is None:
for e in range(epochs):
tr_preds, tr_targets = [], []
for x, y in self.get_batch(data_train[0], data_train[1], batch_size=clp_batch_size):
if self.clp.learner_name == "MLP":
x = x.mean(1)
if length_predictor.as_classification:
y = y.to(torch.long)
tr_targets.extend(y.tolist())
if(train_on_gpu):
x, y = x.cuda(), y.cuda()
#ipdb.set_trace()
y_hat = length_predictor(x)
if length_predictor.as_classification:
tr_preds.extend(y_hat.cpu().detach().argmax(1).tolist())
clp_loss = self.loss(y_hat, y)
else:
tr_preds.extend(torch.round(y_hat.cpu().detach()).squeeze().tolist())
clp_loss = self.loss(y_hat.squeeze(), y)
# calculate the loss and perform backprop
train_losses.append(clp_loss.item())
opt.zero_grad()
clp_loss.backward()
opt.step()
if self.decay_rate:
self.scheduler.step()
tr_acc = 100*accuracy_score(tr_preds, tr_targets)
Train_loss.append(np.mean(train_losses))
Train_acc.append(tr_acc)
print("Epoch: {}/{}...".format(e+1, epochs),
"Train loss: {:.4f}...".format(np.mean(train_losses)),
"Train acc: {:.2f}%...".format(tr_acc))
train_losses = []
weights_clp = copy.deepcopy(length_predictor.state_dict())
if embeddings is None:
weights_emb = copy.deepcopy(embedding_model.state_dict())
if Train_acc and Train_acc[-1] > best_performance:
best_performance = Train_acc[-1]
best_weights_clp = weights_clp
if embeddings is None:
best_weights_emb = weights_emb
else:
for e in range(epochs):
tr_preds, tr_targets = [], []
for x, y in self.clp.dataloader.load(Emb, data=data_train, batch_size=clp_batch_size, shuffle=True):
if self.clp.learner_name == "MLP":
x = x.mean(1)
if include_embedding_loss:
head_batch = head_to_relation_batch[tc_iterator%len(head_to_relation_batch)]
tc_iterator += 1
e1_idx, r_idx, tc_targets = head_batch
if train_on_gpu:
tc_targets = tc_targets.cuda()
r_idx = r_idx.cuda()
e1_idx = e1_idx.cuda()
if tc_label_smoothing:
tc_targets = ((1.0 - tc_label_smoothing) * tc_targets) + (1.0 / tc_targets.size(1))
tc_loss = embedding_model.forward_head_and_loss(e1_idx, r_idx, tc_targets)
if length_predictor.as_classification:
y = y.to(torch.long)
tr_targets.extend(y.tolist())
if(train_on_gpu):
x, y = x.cuda(), y.cuda()
#ipdb.set_trace()
y_hat = length_predictor(x)
if length_predictor.as_classification:
tr_preds.extend(y_hat.cpu().detach().argmax(1).tolist())
clp_loss = self.loss(y_hat, y)
else:
tr_preds.extend(torch.round(y_hat.cpu().detach()).squeeze().tolist())
clp_loss = self.loss(y_hat.squeeze(), y)
# calculate the loss and perform backprop
if include_embedding_loss:
tclp_loss = 0.5*clp_loss + 0.5*tc_loss
else:
tclp_loss = clp_loss
train_losses.append(tclp_loss.item())
opt.zero_grad()
tclp_loss.backward()
opt.step()
if self.decay_rate:
self.scheduler.step()
Emb = self.clp.get_embedding(embedding_model)
tr_acc = 100*accuracy_score(tr_preds, tr_targets)
Train_loss.append(np.mean(train_losses))
Train_acc.append(tr_acc)
print("Epoch: {}/{}...".format(e+1, epochs),
"Train loss: {:.4f}...".format(np.mean(train_losses)),
"Train acc: {:.2f}%...".format(tr_acc))
train_losses = []
weights_clp = copy.deepcopy(length_predictor.state_dict())
if embeddings is None:
weights_emb = copy.deepcopy(embedding_model.state_dict())
if Train_acc and Train_acc[-1] > best_performance:
best_performance = Train_acc[-1]
best_weights_clp = weights_clp
if embeddings is None:
best_weights_emb = weights_emb
length_predictor.load_state_dict(best_weights_clp)
if embeddings is None:
embedding_model.load_state_dict(best_weights_emb)
results_dict = dict()
if test:
print()
print("#"*50)
print("Testing the model ....................")
print()
length_predictor.eval()
if embeddings is None:
embedding_model.eval()
preds, targets = [],[]
if self.clp.embedding_model is None:
for x, y in self.get_batch(data_test[0], data_test[1], batch_size=clp_batch_size, shuffle=False):
if length_predictor.as_classification:
y = y.to(torch.long)
if self.clp.learner_name == "MLP":
x = x.mean(1)
targets.extend(y.tolist())
if train_on_gpu:
x, y = x.cuda(), y.cuda()
y_hat = length_predictor(x)
if length_predictor.as_classification:
preds.extend(y_hat.cpu().detach().argmax(1).tolist())
else:
preds.extend(torch.round(y_hat.cpu().detach()).squeeze().tolist())
else:
for x, y in self.clp.dataloader.load(Emb, data=data_test, batch_size=clp_batch_size, shuffle=False):
if length_predictor.as_classification:
y = y.to(torch.long)
if self.clp.learner_name == "MLP":
x = x.mean(1)
targets.extend(y.tolist())
if train_on_gpu:
x, y = x.cuda(), y.cuda()
y_hat = length_predictor(x)
if length_predictor.as_classification:
preds.extend(y_hat.cpu().detach().argmax(1).tolist())
else:
preds.extend(torch.round(y_hat.cpu().detach()).squeeze().tolist())
test_acc = 100.*accuracy_score(preds, targets)
print("Test for {}:".format(length_predictor.name))
print("Test accuracy: ", test_acc)
f1 = 100.*f1_score(preds, targets, average='macro')
results_dict.update({"Test acc":test_acc, "Test f1": f1})
print("Test f1 score: ", f1)
print("Train accuracy: {}".format(max(Train_acc)))
print()
results_dict.update({"Train Max Acc": max(Train_acc), "Train Min Loss": min(Train_loss)})
if not os.path.exists(self.kwargs['path_to_triples'].split("Triples")[0]+"Results/"):
os.mkdir(self.kwargs['path_to_triples'].split("Triples")[0]+"Results/")
if not os.path.exists(self.kwargs['path_to_triples'].split("Triples")[0]+"Model_weights/"):
os.mkdir(self.kwargs['path_to_triples'].split("Triples")[0]+"Model_weights/")
if embeddings is None:
with open(self.kwargs['path_to_triples'].split("Triples")[0]+"Results/"+"Train_Results_"+embedding_model.name+'_'+length_predictor.name+"_final.json", "w") as file:
json.dump(results_dict, file, indent=3)
else:
with open(self.kwargs['path_to_triples'].split("Triples")[0]+"Results/"+"Train_Results_"+length_predictor.name+"_final.json", "w") as file:
json.dump(results_dict, file, indent=3)
if save_model:
torch.save(length_predictor, self.kwargs['path_to_triples'].split("Triples")[0]+"Model_weights/"+length_predictor.name+"_final.pt")
if embeddings is None:
torch.save(embedding_model, self.kwargs['path_to_triples'].split("Triples")[0]+"Model_weights/"+embedding_model.name+'_'+length_predictor.name+"_final.pt")
print("{} saved".format(length_predictor.name))
print()
if record_runtime:
duration = time.time()-t0
runtime_info = {"Length Learner": length_predictor.name,
"Number of Epochs": epochs, "Runtime (s)": duration}
if not os.path.exists(self.kwargs['path_to_triples'].split("Triples")[0]+"Runtime"):
os.mkdir(self.kwargs['path_to_triples'].split("Triples")[0]+"Runtime")
if embeddings is None:
with open(self.kwargs['path_to_triples'].split("Triples")[0]+"Runtime/"+"Runtime_"+embedding_model.name+'_'+length_predictor.name+".json", "w") as file:
json.dump(runtime_info, file, indent=3)
else:
with open(self.kwargs['path_to_triples'].split("Triples")[0]+"Runtime/"+"Runtime_"+length_predictor.name+".json", "w") as file:
json.dump(runtime_info, file, indent=3)
return Train_acc, Train_loss
def cross_validate(self, data_train, data_test, epochs=200, clp_batch_size=64, tc_batch_size=512, kf_n_splits=10, test=False, save_model = False, include_embedding_loss=False, optimizer = 'Adam', tc_label_smoothing=0.9):
if self.clp.embedding_model is not None:
Weights = {int(l): 1./torch.sqrt(torch.tensor(c)) for l,c in Counter([v["target concept length"] for _,v in data_train]).items()}
else:
Weights = {int(l): 1./torch.sqrt(torch.tensor(c)) for l,c in Counter(data_train[1].tolist()).items()}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Loss weights: ", Weights)
if self.clp.length_predictor.as_classification:
W = []
for l in range(max(Weights.keys())+1):
if not l in Weights:
W.append(torch.tensor(0.))
else:
W.append(Weights[l])
self.loss = CrossEntropyLoss(weight=torch.Tensor(W).to(device))
else:
self.loss = WeightedMSELoss(Weights)
if self.clp.embedding_model is not None and include_embedding_loss:
triple_data_idxs = self.get_data_idxs(self.clp.dataloader.data)
head_to_relation_batch = list(DataLoader(
HeadAndRelationBatchLoader(er_vocab=self.get_er_vocab(triple_data_idxs), num_e=len(self.clp.dataloader.entities)),batch_size=tc_batch_size, num_workers=12, shuffle=True))
embeddings = None
if self.clp.embedding_model is None:
embeddings = self.clp.get_embedding(embedding_model=None)
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print("Training on CPU, it may take long...")
else:
print("GPU available !")
best_performance = 0.
print()
print("#"*50)
print()
print("{} starts training on {} data set \n".format(self.clp.length_predictor.name, self.kwargs['path_to_triples'].split("/")[-3]))
print("#"*50, "\n")
from sklearn.model_selection import KFold
Kf = KFold(n_splits=kf_n_splits, shuffle=True, random_state=142)
fold = 0
All_losses = defaultdict(lambda: [])
All_acc = defaultdict(lambda: [])
iterable = data_train if self.clp.embedding_model is not None else list(range(len(data_train[1])))
for train_index, valid_index in Kf.split(iterable):
self.show_num_learnable_params()
length_predictor = copy.deepcopy(self.clp.length_predictor)
embedding_model = None
if self.clp.embedding_model is not None:
embedding_model = copy.deepcopy(self.clp.embedding_model)
if train_on_gpu:
length_predictor.cuda()
if embeddings is None:
embedding_model.cuda()
if embeddings is None:
opt = self.get_optimizer(length_predictor=length_predictor, optimizer=optimizer, embedding_model=embedding_model)
else:
opt = self.get_optimizer(length_predictor=length_predictor, optimizer=optimizer)
if self.decay_rate:
self.scheduler = ExponentialLR(opt, self.decay_rate)
if self.clp.embedding_model is None:
x_train, x_valid = data_train[0][train_index], data_train[0][valid_index]
y_train, y_valid = data_train[1][train_index], data_train[1][valid_index]
else:
d_train, d_valid = np.array(data_train,dtype=object)[train_index], np.array(data_train,dtype=object)[valid_index]
fold += 1
print("*"*50)
print("Fold {}/{}:\n".format(fold, kf_n_splits))
print("*"*50, "\n")
train_losses = []
Train_losses = []
Val_losses = []
Train_acc = []
Val_acc = []
if self.clp.embedding_model is not None and include_embedding_loss:
tc_iterator = 0
Emb = embeddings if embeddings is not None else self.clp.get_embedding(embedding_model)
if self.clp.embedding_model is None:
for e in range(epochs):
tr_preds, tr_targets = [], []
for x, y in self.get_batch(x_train, y_train, batch_size=clp_batch_size):
if self.clp.learner_name == "MLP":
x = x.mean(1)
if length_predictor.as_classification:
y = y.to(torch.long)
tr_targets.extend(y.tolist())
if(train_on_gpu):
x, y = x.cuda(), y.cuda()
y_hat = length_predictor(x)
if length_predictor.as_classification:
tr_preds.extend(y_hat.cpu().detach().argmax(1).tolist())
clp_loss = self.loss(y_hat, y)
else:
tr_preds.extend(torch.round(y_hat.cpu().detach()).squeeze().tolist())
clp_loss = self.loss(y_hat.squeeze(), y)
# calculate the loss and perform backprop
train_losses.append(clp_loss.item())
opt.zero_grad()
clp_loss.backward()
opt.step()
if self.decay_rate:
self.scheduler.step()
tr_acc = 100*accuracy_score(tr_preds, tr_targets)
# Get validation loss
val_losses = []
length_predictor.eval()
if embeddings is None:
embedding_model.eval()
val_targets, val_preds = [], []
for x, y in self.get_batch(x_valid, y_valid, batch_size=clp_batch_size):
if length_predictor.as_classification:
y = y.to(torch.long)
val_targets.extend(y.tolist())
if self.clp.learner_name == "MLP":
x = x.mean(1)
if(train_on_gpu):
x, y = x.cuda(), y.cuda()
y_hat = length_predictor(x)
if length_predictor.as_classification:
val_preds.extend(y_hat.cpu().detach().argmax(1).tolist())
val_loss = self.loss(y_hat, y)
else:
val_preds.extend(torch.round(y_hat.cpu().detach()).squeeze().tolist())
val_loss = self.loss(y_hat.squeeze(), y)
val_losses.append(val_loss.item())
length_predictor.train() # reset to train mode after iterationg through validation data
if embeddings is None:
embedding_model.train()
Train_losses.append(np.mean(train_losses))
Val_losses.append(np.mean(val_losses))
val_acc = 100*accuracy_score(val_preds, val_targets)
Val_acc.append(val_acc)
Train_acc.append(tr_acc)
print("Epoch: {}/{}...".format(e+1, epochs),
"Train loss: {:.4f}...".format(np.mean(train_losses)),
"Val loss: {:.4f}...".format(np.mean(val_losses)),
"Train acc: {:.2f}%...".format(tr_acc),
"Val acc: {:.2f}%".format(val_acc))
train_losses = []
weights_clp = copy.deepcopy(length_predictor.state_dict())
if embeddings is None:
weights_emb = copy.deepcopy(embedding_model.state_dict())
if Val_acc and Val_acc[-1] > best_performance:
best_performance = Val_acc[-1]
best_weights_clp = weights_clp
if embeddings is None:
best_weights_emb = weights_emb
All_losses["train"].append(Train_losses)
All_losses["val"].append(Val_losses)
All_acc["train"].append(Train_acc)
All_acc["val"].append(Val_acc)
else:
for e in range(epochs):
tr_targets, tr_preds = [], []
for x, y in self.clp.dataloader.load(Emb, data=d_train, batch_size=clp_batch_size, shuffle=True):
if self.clp.learner_name == "MLP":
x = x.mean(1)
if self.clp.embedding_model is not None and include_embedding_loss:
head_batch = head_to_relation_batch[tc_iterator%len(head_to_relation_batch)]
tc_iterator += 1
e1_idx, r_idx, tc_targets = head_batch
if train_on_gpu:
tc_targets = tc_targets.cuda()
r_idx = r_idx.cuda()
e1_idx = e1_idx.cuda()
if tc_label_smoothing:
tc_targets = ((1.0 - tc_label_smoothing) * tc_targets) + (1.0 / tc_targets.size(1))
tc_loss = embedding_model.forward_head_and_loss(e1_idx, r_idx, tc_targets)
if length_predictor.as_classification:
y = y.to(torch.long)
tr_targets.extend(y.tolist())
if(train_on_gpu):
x, y = x.cuda(), y.cuda()
y_hat = length_predictor(x)
tr_total_dpoints_before_eval += len(y)
if length_predictor.as_classification:
tr_preds.extend(y_hat.cpu().detach().argmax(1).tolist())
clp_loss = self.loss(y_hat, y)
else:
tr_preds.extend(torch.round(y_hat.cpu().detach()).squeeze().tolist())
clp_loss = self.loss(y_hat.squeeze(), y)
# calculate the loss and perform backprop
if self.clp.embedding_model is not None and include_embedding_loss:
tclp_loss = 0.5*clp_loss + 0.5*tc_loss
else:
tclp_loss = clp_loss
train_losses.append(tclp_loss.item())
opt.zero_grad()
tclp_loss.backward()
opt.step()
if self.decay_rate:
self.scheduler.step()
if self.clp.embedding_model is not None:
Emb = embeddings if embeddings is not None else self.clp.get_embedding(embedding_model)
tr_acc = 100*accuracy_score(tr_preds, tr_targets)
# Get validation loss
val_losses = []
length_predictor.eval()
if embeddings is None:
embedding_model.eval()
val_preds, val_targets = [], []
for x, y in self.clp.dataloader.load(Emb, data=d_valid, batch_size=clp_batch_size, shuffle=False):
if length_predictor.as_classification:
y = y.to(torch.long)
if self.clp.learner_name == "MLP":
x = x.mean(1)
val_targets.extend(y.tolist())
if(train_on_gpu):
x, y = x.cuda(), y.cuda()
if self.clp.embedding_model is not None and include_embedding_loss:
head_batch = random.choice(head_to_relation_batch)
e1_idx, r_idx, tc_targets = head_batch
if train_on_gpu:
tc_targets = tc_targets.cuda()
r_idx = r_idx.cuda()
e1_idx = e1_idx.cuda()
if tc_label_smoothing:
tc_targets = ((1.0 - tc_label_smoothing) * tc_targets) + (1.0 / tc_targets.size(1))
tc_loss = embedding_model.forward_head_and_loss(e1_idx, r_idx, tc_targets)
y_hat = length_predictor(x)
if length_predictor.as_classification:
val_preds.extend(y_hat.cpu().detach().argmax(1).tolist())
val_loss = self.loss(y_hat, y)
else:
val_preds.extend(torch.round(y_hat.cpu().detach()).squeeze().tolist())
val_loss = self.loss(y_hat.squeeze(), y)
if self.clp.embedding_model is not None and include_embedding_loss:
tclp_loss = 0.5*val_loss + 0.5*tc_loss
else:
tclp_loss = val_loss
val_losses.append(tclp_loss.item())
length_predictor.train() # reset to train mode after iterationg through validation data
if embeddings is None:
embedding_model.train()
Train_losses.append(np.mean(train_losses))
Val_losses.append(np.mean(val_losses))
val_acc = 100.*accuracy_score(val_preds, val_targets)
Val_acc.append(val_acc)
Train_acc.append(tr_acc)
print("Epoch: {}/{}...".format(e+1, epochs),
"Train loss: {:.4f}...".format(np.mean(train_losses)),
"Val loss: {:.4f}...".format(np.mean(val_losses)),
"Train acc: {:.2f}%...".format(tr_acc),
"Val acc: {:.2f}%".format(val_acc))
train_losses = []
tr_total_dpoints_before_eval = 0.
weights_clp = copy.deepcopy(length_predictor.state_dict())
if embeddings is None:
weights_emb = copy.deepcopy(embedding_model.state_dict())
if Val_acc and Val_acc[-1] > best_performance:
best_performance = Val_acc[-1]
best_weights_clp = weights_clp
if embeddings is None:
best_weights_emb = weights_emb
All_losses["train"].append(Train_losses)
All_losses["val"].append(Val_losses)
All_acc["train"].append(Train_acc)
All_acc["val"].append(Val_acc)
min_num_steps = min(min([len(l) for l in All_losses['train']]), min([len(l) for l in All_losses['val']]))
train_l = np.array([l[:min_num_steps] for l in All_losses["train"]]).mean(0)
val_l = np.array([l[:min_num_steps] for l in All_losses["val"]]).mean(0)
t_acc = np.array([l[:min_num_steps] for l in All_acc["train"]]).mean(0)
v_acc = np.array([l[:min_num_steps] for l in All_acc["val"]]).mean(0)
del All_losses, All_acc
length_predictor.load_state_dict(best_weights_clp)
if embeddings is None:
embedding_model.load_state_dict(best_weights_emb)
results_dict = dict()
if test:
print()
print("#"*50)
print("Testing the model ....................")
print()
length_predictor.eval()
if embeddings is None:
embedding_model.eval()
preds, targets = [], []
if self.clp.embedding_model is None:
for x, y in self.get_batch(data_test[0], data_test[1], batch_size=clp_batch_size, shuffle=False):
if train_on_gpu:
x = x.cuda()
if self.clp.learner_name == "MLP":
x = x.mean(1)
targets.extend(y.tolist())
y_hat = length_predictor(x)
if length_predictor.as_classification:
preds.extend(y_hat.cpu().detach().argmax(1).tolist())
else:
preds.extend(torch.round(y_hat.cpu().detach()).squeeze().tolist())
else:
for x, y in self.clp.dataloader.load(Emb, data=data_test, batch_size=clp_batch_size, shuffle=False):
if length_predictor.as_classification:
y = y.to(torch.long)
if train_on_gpu:
x, y = x.cuda(), y.cuda()
if self.clp.learner_name == "MLP":
x = x.mean(1)
y_hat = length_predictor(x)
if length_predictor.as_classification:
preds.extend(y_hat.cpu().detach().argmax(1).tolist())
else:
preds.extend(torch.round(y_hat.cpu().detach()).squeeze().tolist())
test_acc = 100.*accuracy_score(preds, targets)
print("Test for {}:".format(length_predictor.name))
print("Test accuracy: ", test_acc)
f1 = 100*f1_score(preds, targets, average='macro')
results_dict.update({"Test acc":test_acc, "Test f1": f1})
print("Test f1 score: ", f1)
print("Train avg acc: {}, Val avg acc: {}".format(max(t_acc), max(v_acc)))
print()
results_dict.update({"Train Avg Acc": max(t_acc), "Train Avg Loss": min(train_l), "Val Avg Acc": max(v_acc), "Val Avg Loss": min(val_l)})
if not os.path.exists(self.kwargs['path_to_triples'].split("Triples")[0]+"Results/"):
os.mkdir(self.kwargs['path_to_triples'].split("Triples")[0]+"Results/")
if not os.path.exists(self.kwargs['path_to_triples'].split("Triples")[0]+"Model_weights/"):
os.mkdir(self.kwargs['path_to_triples'].split("Triples")[0]+"Model_weights/")
if embeddings is None:
with open(self.kwargs['path_to_triples'].split("Triples")[0]+"Results/"+"Train_Results_"+embedding_model.name+'_'+length_predictor.name+".json", "w") as file:
json.dump(results_dict, file, indent=3)
else:
with open(self.kwargs['path_to_triples'].split("Triples")[0]+"Results/"+"Train_Results_"+length_predictor.name+".json", "w") as file:
json.dump(results_dict, file, indent=3)
if save_model:
torch.save(length_predictor, self.kwargs['path_to_triples'].split("Triples")[0]+"Model_weights/"+length_predictor.name+".pt")
if embeddings is None:
torch.save(embedding_model, self.kwargs['path_to_triples'].split("Triples")[0]+"Model_weights/"+embedding_model.name+'_'+length_predictor.name+".pt")
print("{} saved".format(length_predictor.name))
return t_acc, v_acc, train_l, val_l
def train_and_eval(self, data_train, data_test, epochs=200, clp_batch_size=64, tc_batch_size=512, kf_n_splits=10, cross_validate=False, test=False, save_model = False, include_embedding_loss=False, optimizer = 'Adam', tc_label_smoothing=0.9, record_runtime=False):
"""
function for training a concept length learner in DL KGs
key args
-> cll_batch_size: batch_size for the concept learner training (clp: concept length predictor)
-> tc_batch_size: batch_size for the training the embedding model (tc: triple classification)
key args
"""
if cross_validate:
return self.cross_validate(data_train, data_test, epochs, clp_batch_size, tc_batch_size,
kf_n_splits, test, save_model, include_embedding_loss, optimizer, tc_label_smoothing)
else:
return self.train(data_train, data_test, epochs, clp_batch_size, tc_batch_size,
kf_n_splits, test, save_model, include_embedding_loss, optimizer, tc_label_smoothing, record_runtime)
def train_all_nets(self, List_nets, data_train, data_test, epochs=200, clp_batch_size=64, tc_batch_size=512, kf_n_splits=10, cross_validate=False, test=False, save_model = False, include_embedding_loss=False, optimizer = 'Adam', tc_label_smoothing=0.9, record_runtime=False):
if self.clp.embedding_model is None:
embeddings = self.clp.get_embedding(embedding_model=None)
print("Loading train and validate data\n")
data_train = self.clp.dataloader.load(embeddings, data=data_train, shuffle=True)
data_test = self.clp.dataloader.load(embeddings, data=data_test, shuffle=False)
print("Done loading train and validate data\n")
Training_data = dict()
Validation_data = dict()
if not os.path.exists(self.kwargs['path_to_triples'].split("Triples")[0]+"Training_curves"):
os.mkdir(self.kwargs['path_to_triples'].split("Triples")[0]+"Training_curves")
if cross_validate:
for net in List_nets:
self.clp.learner_name = net
self.clp.refresh()
t_acc, v_acc, train_l, val_l = self.train_and_eval(data_train, data_test, epochs, clp_batch_size, tc_batch_size, kf_n_splits, cross_validate, test, save_model, include_embedding_loss, optimizer, tc_label_smoothing, record_runtime)
Training_data.setdefault("acc", []).append(list(t_acc))
Training_data.setdefault("loss", []).append(list(train_l))
Validation_data.setdefault("acc", []).append(list(v_acc))
Validation_data.setdefault("loss", []).append(list(val_l))
if not os.path.exists(self.kwargs['path_to_triples'].split("Triples")[0]+"Plot_data/"):
os.mkdir(self.kwargs['path_to_triples'].split("Triples")[0]+"Plot_data/")
if len(List_nets) > 1:
with open(self.kwargs['path_to_triples'].split("Triples")[0]+"Plot_data/plot_data_with_val.json", "w") as plot_file:
json.dump(Training_data, plot_file, indent=3)
else:
with open(self.kwargs['path_to_triples'].split("Triples")[0]+"Plot_data/plot_data_with_val_single.json", "w") as plot_file:
json.dump(Training_data, plot_file, indent=3)
for crv in Training_data['acc']:
plt.plot(crv)
plt.legend(tuple(List_nets))
plt.xlabel("Number of epochs")
plt.ylabel("Accuracy (%)")
plt.savefig(self.kwargs['path_to_triples'].split("Triples")[0]+"Training_curves/tr_acc.png")
plt.close()
for crv in Training_data['loss']:
plt.plot(crv)
plt.legend(tuple(List_nets))
plt.xlabel("Number of epochs")
plt.ylabel("Loss")
plt.savefig(self.kwargs['path_to_triples'].split("Triples")[0]+"Training_curves/tr_loss.png")
plt.close()
for crv in Validation_data['acc']:
plt.plot(crv)
plt.legend(tuple(List_nets))
plt.xlabel("Number of epochs")
plt.ylabel("Accuracy (%)")
plt.savefig(self.kwargs['path_to_triples'].split("Triples")[0]+"Training_curves/val_acc.png")
plt.close()
for crv in Validation_data['loss']:
plt.plot(crv)
plt.legend(tuple(List_nets))
plt.xlabel("Number of epochs")
plt.ylabel("Loss")
plt.savefig(self.kwargs['path_to_triples'].split("Triples")[0]+"Training_curves/val_loss.png")
plt.close()
else:
for net in List_nets:
self.clp.learner_name = net
self.clp.refresh()
t_acc, train_l = self.train_and_eval(data_train, data_test, epochs, clp_batch_size, tc_batch_size, kf_n_splits, cross_validate, test, save_model, include_embedding_loss, optimizer, tc_label_smoothing, record_runtime)
Training_data.setdefault("acc", []).append(t_acc)
Training_data.setdefault("loss", []).append(train_l)
if not os.path.exists(self.kwargs['path_to_triples'].split("Triples")[0]+"Plot_data/"):
os.mkdir(self.kwargs['path_to_triples'].split("Triples")[0]+"Plot_data/")
if len(List_nets) > 1:
with open(self.kwargs['path_to_triples'].split("Triples")[0]+"Plot_data/plot_data_no_val.json", "w") as plot_file:
json.dump(Training_data, plot_file, indent=3)
else:
with open(self.kwargs['path_to_triples'].split("Triples")[0]+"Plot_data/plot_data_no_val_single.json", "w") as plot_file:
json.dump(Training_data, plot_file, indent=3)
# for crv in Training_data['acc']:
# plt.plot(crv)
# plt.legend(tuple(List_nets))
# plt.xlabel("Number of epochs")
# plt.ylabel("Accuracy (%)")
# plt.savefig(self.kwargs['path_to_triples'].split("Triples")[0]+"Training_curves/no_val_tr_acc.png")
# plt.close()
#
# for crv in Training_data['loss']:
# plt.plot(crv)
# plt.legend(tuple(List_nets))
# plt.xlabel("Number of epochs")
# plt.ylabel("Loss")
# plt.savefig(self.kwargs['path_to_triples'].split("Triples")[0]+"Training_curves/no_val_tr_loss.png")
# plt.close()
#
| 55.34
| 279
| 0.551161
| 5,240
| 44,272
| 4.412595
| 0.062595
| 0.055791
| 0.024219
| 0.027679
| 0.852046
| 0.838812
| 0.818614
| 0.796038
| 0.779128
| 0.776274
| 0
| 0.010653
| 0.334207
| 44,272
| 799
| 280
| 55.409262
| 0.773782
| 0.048993
| 0
| 0.763713
| 0
| 0
| 0.074062
| 0.005957
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015471
| false
| 0
| 0.021097
| 0
| 0.056259
| 0.075949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7acafb271ca92b2cbf6c978c41a5d5bf5fdddb6d
| 784
|
py
|
Python
|
ex018.py
|
lucivaniairis/primeiro-reposit-rio-
|
394fab574304c1a3456c3985de17cae1dc1c9f37
|
[
"MIT"
] | null | null | null |
ex018.py
|
lucivaniairis/primeiro-reposit-rio-
|
394fab574304c1a3456c3985de17cae1dc1c9f37
|
[
"MIT"
] | null | null | null |
ex018.py
|
lucivaniairis/primeiro-reposit-rio-
|
394fab574304c1a3456c3985de17cae1dc1c9f37
|
[
"MIT"
] | null | null | null |
#import math
#angulo=float(input('Digite um Ângulo:'))
#seno=math.sin(math.radians(angulo))
#print('O ângulo de {} tem o seno de {:.2f}'.format(angulo, seno))
#cosseno=math.cos(math.radians(angulo))
#print('O angulo de {} tem o cosseno de {:.2f}'.format(angulo, cosseno))
#tangente=math.tan(math.radians(angulo))
#print('O ângulo de {} tem a tangente de {:.2f}'.format(angulo, tangente))
from math import sin, cos, tan, radians
angulo=float(input('Digite um Ângulo:'))
seno=sin(radians(angulo))
print('O ângulo de {} tem o seno de {:.2f}'.format(angulo, seno))
cosseno=cos(radians(angulo))
print('O angulo de {} tem o cosseno de {:.2f}'.format(angulo, cosseno))
tangente=tan(radians(angulo))
print('O ângulo de {} tem a tangente de {:.2f}'.format(angulo, tangente))
| 43.555556
| 75
| 0.686224
| 123
| 784
| 4.373984
| 0.186992
| 0.169145
| 0.200743
| 0.211896
| 0.869888
| 0.862454
| 0.862454
| 0.736059
| 0.72119
| 0.72119
| 0
| 0.008759
| 0.126276
| 784
| 17
| 76
| 46.117647
| 0.776642
| 0.47449
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.375
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
bb0c473228711638fc9f26fbaf0f29d75930cc8d
| 155
|
py
|
Python
|
formaldict/__init__.py
|
Opus10/formaldict
|
eb1208e3f4b15dfae199c579e64db699ee093ab9
|
[
"BSD-3-Clause"
] | 8
|
2020-06-24T16:01:15.000Z
|
2020-06-29T17:30:42.000Z
|
formaldict/__init__.py
|
Opus10/formaldict
|
eb1208e3f4b15dfae199c579e64db699ee093ab9
|
[
"BSD-3-Clause"
] | null | null | null |
formaldict/__init__.py
|
Opus10/formaldict
|
eb1208e3f4b15dfae199c579e64db699ee093ab9
|
[
"BSD-3-Clause"
] | 1
|
2020-06-29T01:32:13.000Z
|
2020-06-29T01:32:13.000Z
|
from formaldict.core import Errors
from formaldict.core import FormalDict
from formaldict.core import Schema
__all__ = ['Errors', 'FormalDict', 'Schema']
| 25.833333
| 44
| 0.793548
| 19
| 155
| 6.263158
| 0.368421
| 0.352941
| 0.453782
| 0.605042
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116129
| 155
| 5
| 45
| 31
| 0.868613
| 0
| 0
| 0
| 0
| 0
| 0.141935
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bb487084046a34e5bbd74ce731dcb38b685ed616
| 84
|
py
|
Python
|
data_store/__init__.py
|
scaramallion/pydicom-data
|
bbb723879690bb77e077a6d57657930998e92bd5
|
[
"MIT"
] | null | null | null |
data_store/__init__.py
|
scaramallion/pydicom-data
|
bbb723879690bb77e077a6d57657930998e92bd5
|
[
"MIT"
] | null | null | null |
data_store/__init__.py
|
scaramallion/pydicom-data
|
bbb723879690bb77e077a6d57657930998e92bd5
|
[
"MIT"
] | null | null | null |
from data_store._version import __version__
from data_store.utils import DataStore
| 21
| 43
| 0.869048
| 12
| 84
| 5.5
| 0.583333
| 0.242424
| 0.393939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 84
| 3
| 44
| 28
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bb54f3fc2c4061183c232d1932341662d7b45438
| 113,702
|
py
|
Python
|
QCA4020_SDK/target/sectools/qdn/sectools/features/isc/iot/cfgparser/auto_gen_xml_config.py
|
r8d8/lastlock
|
78c02e5fbb129b1bc4147bd55eec2882267d7e87
|
[
"Apache-2.0"
] | null | null | null |
QCA4020_SDK/target/sectools/qdn/sectools/features/isc/iot/cfgparser/auto_gen_xml_config.py
|
r8d8/lastlock
|
78c02e5fbb129b1bc4147bd55eec2882267d7e87
|
[
"Apache-2.0"
] | null | null | null |
QCA4020_SDK/target/sectools/qdn/sectools/features/isc/iot/cfgparser/auto_gen_xml_config.py
|
r8d8/lastlock
|
78c02e5fbb129b1bc4147bd55eec2882267d7e87
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated by generateDS.py version 2.6b.
#
import sys
import getopt
import re as re_
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class iot(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, metadata=None, general_properties=None, parsegen=None, images_list=None, data_provisioning=None):
self.metadata = metadata
self.general_properties = general_properties
self.parsegen = parsegen
self.images_list = images_list
self.data_provisioning = data_provisioning
def factory(*args_, **kwargs_):
if iot.subclass:
return iot.subclass(*args_, **kwargs_)
else:
return iot(*args_, **kwargs_)
factory = staticmethod(factory)
def get_metadata(self): return self.metadata
def set_metadata(self, metadata): self.metadata = metadata
def get_general_properties(self): return self.general_properties
def set_general_properties(self, general_properties): self.general_properties = general_properties
def get_parsegen(self): return self.parsegen
def set_parsegen(self, parsegen): self.parsegen = parsegen
def get_images_list(self): return self.images_list
def set_images_list(self, images_list): self.images_list = images_list
def get_data_provisioning(self): return self.data_provisioning
def set_data_provisioning(self, data_provisioning): self.data_provisioning = data_provisioning
def export(self, outfile, level, namespace_='tns:', name_='iot', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='iot')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='tns:', name_='iot'):
pass
def exportChildren(self, outfile, level, namespace_='tns:', name_='iot', fromsubclass_=False):
if self.metadata:
self.metadata.export(outfile, level, namespace_, name_='metadata', )
if self.general_properties:
self.general_properties.export(outfile, level, namespace_, name_='general_properties', )
if self.parsegen:
self.parsegen.export(outfile, level, namespace_, name_='parsegen', )
if self.images_list:
self.images_list.export(outfile, level, namespace_, name_='images_list', )
if self.data_provisioning:
self.data_provisioning.export(outfile, level, namespace_, name_='data_provisioning', )
def hasContent_(self):
if (
self.metadata is not None or
self.general_properties is not None or
self.parsegen is not None or
self.images_list is not None or
self.data_provisioning is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='iot'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.metadata is not None:
showIndent(outfile, level)
outfile.write('metadata=model_.complex_metadata(\n')
self.metadata.exportLiteral(outfile, level, name_='metadata')
showIndent(outfile, level)
outfile.write('),\n')
if self.general_properties is not None:
showIndent(outfile, level)
outfile.write('general_properties=model_.complex_general_properties(\n')
self.general_properties.exportLiteral(outfile, level, name_='general_properties')
showIndent(outfile, level)
outfile.write('),\n')
if self.parsegen is not None:
showIndent(outfile, level)
outfile.write('parsegen=model_.complex_parsegen(\n')
self.parsegen.exportLiteral(outfile, level, name_='parsegen')
showIndent(outfile, level)
outfile.write('),\n')
if self.images_list is not None:
showIndent(outfile, level)
outfile.write('images_list=model_.complex_images_list(\n')
self.images_list.exportLiteral(outfile, level, name_='images_list')
showIndent(outfile, level)
outfile.write('),\n')
if self.data_provisioning is not None:
showIndent(outfile, level)
outfile.write('data_provisioning=model_.complex_data_provisioning(\n')
self.data_provisioning.exportLiteral(outfile, level, name_='data_provisioning')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'metadata':
obj_ = complex_metadata.factory()
obj_.build(child_)
self.set_metadata(obj_)
elif nodeName_ == 'general_properties':
obj_ = complex_general_properties.factory()
obj_.build(child_)
self.set_general_properties(obj_)
elif nodeName_ == 'parsegen':
obj_ = complex_parsegen.factory()
obj_.build(child_)
self.set_parsegen(obj_)
elif nodeName_ == 'images_list':
obj_ = complex_images_list.factory()
obj_.build(child_)
self.set_images_list(obj_)
elif nodeName_ == 'data_provisioning':
obj_ = complex_data_provisioning.factory()
obj_.build(child_)
self.set_data_provisioning(obj_)
# end class iot
class complex_metadata(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, chipset='default', version='1.0'):
self.chipset = chipset
self.version = version
def factory(*args_, **kwargs_):
if complex_metadata.subclass:
return complex_metadata.subclass(*args_, **kwargs_)
else:
return complex_metadata(*args_, **kwargs_)
factory = staticmethod(factory)
def get_chipset(self): return self.chipset
def set_chipset(self, chipset): self.chipset = chipset
def get_version(self): return self.version
def set_version(self, version): self.version = version
def export(self, outfile, level, namespace_='tns:', name_='complex_metadata', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='complex_metadata')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='tns:', name_='complex_metadata'):
pass
def exportChildren(self, outfile, level, namespace_='tns:', name_='complex_metadata', fromsubclass_=False):
if self.chipset is not None:
showIndent(outfile, level)
outfile.write('<%schipset>%s</%schipset>\n' % (namespace_, self.gds_format_string(quote_xml(self.chipset).encode(ExternalEncoding), input_name='chipset'), namespace_))
if self.version is not None:
showIndent(outfile, level)
outfile.write('<%sversion>%s</%sversion>\n' % (namespace_, self.gds_format_string(quote_xml(self.version).encode(ExternalEncoding), input_name='version'), namespace_))
def hasContent_(self):
if (
self.chipset is not None or
self.version is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='complex_metadata'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.chipset is not None:
showIndent(outfile, level)
outfile.write('chipset=%s,\n' % quote_python(self.chipset).encode(ExternalEncoding))
if self.version is not None:
showIndent(outfile, level)
outfile.write('version=%s,\n' % quote_python(self.version).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'chipset':
chipset_ = child_.text
chipset_ = self.gds_validate_string(chipset_, node, 'chipset')
self.chipset = chipset_
elif nodeName_ == 'version':
version_ = child_.text
version_ = self.gds_validate_string(version_, node, 'version')
self.version = version_
# end class complex_metadata
class complex_general_properties(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, selected_cert_config='iot', selected_signer='base', max_cert_size=2048, key_size=2048, exponent=257, num_certs_in_certchain=2, num_root_certs=1, max_num_root_certs=1, msm_part='0x00000000', sw_id='0x0000000000000000', oem_id='0x0000', model_id='0x0000', soc_hw_version='0x00000000', in_use_soc_hw_version=0, use_serial_number_in_signing=0, debug='0x0000000000000000', hash_algorithm='sha256', segment_hash_algorithm='sha256', rsa_padding='pkcs', hmac=True, secboot_version='1.0', oem_sign=True, dsa_type='rsa'):
self.selected_cert_config = selected_cert_config
self.selected_signer = selected_signer
self.max_cert_size = max_cert_size
self.key_size = key_size
self.exponent = exponent
self.num_certs_in_certchain = num_certs_in_certchain
self.num_root_certs = num_root_certs
self.max_num_root_certs = max_num_root_certs
self.msm_part = msm_part
self.sw_id = sw_id
self.oem_id = oem_id
self.model_id = model_id
self.soc_hw_version = soc_hw_version
self.in_use_soc_hw_version = in_use_soc_hw_version
self.use_serial_number_in_signing = use_serial_number_in_signing
self.debug = debug
self.hash_algorithm = hash_algorithm
self.segment_hash_algorithm = segment_hash_algorithm
self.rsa_padding = rsa_padding
self.hmac = hmac
self.secboot_version = secboot_version
self.oem_sign = oem_sign
self.dsa_type = dsa_type
def factory(*args_, **kwargs_):
if complex_general_properties.subclass:
return complex_general_properties.subclass(*args_, **kwargs_)
else:
return complex_general_properties(*args_, **kwargs_)
factory = staticmethod(factory)
def get_selected_cert_config(self): return self.selected_cert_config
def set_selected_cert_config(self, selected_cert_config): self.selected_cert_config = selected_cert_config
def get_selected_signer(self): return self.selected_signer
def set_selected_signer(self, selected_signer): self.selected_signer = selected_signer
def get_max_cert_size(self): return self.max_cert_size
def set_max_cert_size(self, max_cert_size): self.max_cert_size = max_cert_size
def get_key_size(self): return self.key_size
def set_key_size(self, key_size): self.key_size = key_size
def get_exponent(self): return self.exponent
def set_exponent(self, exponent): self.exponent = exponent
def get_num_certs_in_certchain(self): return self.num_certs_in_certchain
def set_num_certs_in_certchain(self, num_certs_in_certchain): self.num_certs_in_certchain = num_certs_in_certchain
def get_num_root_certs(self): return self.num_root_certs
def set_num_root_certs(self, num_root_certs): self.num_root_certs = num_root_certs
def get_max_num_root_certs(self): return self.max_num_root_certs
def set_max_num_root_certs(self, max_num_root_certs): self.max_num_root_certs = max_num_root_certs
def get_msm_part(self): return self.msm_part
def set_msm_part(self, msm_part): self.msm_part = msm_part
def get_sw_id(self): return self.sw_id
def set_sw_id(self, sw_id): self.sw_id = sw_id
def get_oem_id(self): return self.oem_id
def set_oem_id(self, oem_id): self.oem_id = oem_id
def get_model_id(self): return self.model_id
def set_model_id(self, model_id): self.model_id = model_id
def get_soc_hw_version(self): return self.soc_hw_version
def set_soc_hw_version(self, soc_hw_version): self.soc_hw_version = soc_hw_version
def get_in_use_soc_hw_version(self): return self.in_use_soc_hw_version
def set_in_use_soc_hw_version(self, in_use_soc_hw_version): self.in_use_soc_hw_version = in_use_soc_hw_version
def get_use_serial_number_in_signing(self): return self.use_serial_number_in_signing
def set_use_serial_number_in_signing(self, use_serial_number_in_signing): self.use_serial_number_in_signing = use_serial_number_in_signing
def get_debug(self): return self.debug
def set_debug(self, debug): self.debug = debug
def get_hash_algorithm(self): return self.hash_algorithm
def set_hash_algorithm(self, hash_algorithm): self.hash_algorithm = hash_algorithm
def get_segment_hash_algorithm(self): return self.segment_hash_algorithm
def set_segment_hash_algorithm(self, segment_hash_algorithm): self.segment_hash_algorithm = segment_hash_algorithm
def get_rsa_padding(self): return self.rsa_padding
def set_rsa_padding(self, rsa_padding): self.rsa_padding = rsa_padding
def get_hmac(self): return self.hmac
def set_hmac(self, hmac): self.hmac = hmac
def get_secboot_version(self): return self.secboot_version
def set_secboot_version(self, secboot_version): self.secboot_version = secboot_version
def get_oem_sign(self): return self.oem_sign
def set_oem_sign(self, oem_sign): self.oem_sign = oem_sign
def get_dsa_type(self): return self.dsa_type
def set_dsa_type(self, dsa_type): self.dsa_type = dsa_type
def export(self, outfile, level, namespace_='tns:', name_='complex_general_properties', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='complex_general_properties')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='tns:', name_='complex_general_properties'):
pass
def exportChildren(self, outfile, level, namespace_='tns:', name_='complex_general_properties', fromsubclass_=False):
if self.selected_cert_config is not None:
showIndent(outfile, level)
outfile.write('<%sselected_cert_config>%s</%sselected_cert_config>\n' % (namespace_, self.gds_format_string(quote_xml(self.selected_cert_config).encode(ExternalEncoding), input_name='selected_cert_config'), namespace_))
if self.selected_signer is not None:
showIndent(outfile, level)
outfile.write('<%sselected_signer>%s</%sselected_signer>\n' % (namespace_, self.gds_format_string(quote_xml(self.selected_signer).encode(ExternalEncoding), input_name='selected_signer'), namespace_))
if self.max_cert_size is not None:
showIndent(outfile, level)
outfile.write('<%smax_cert_size>%s</%smax_cert_size>\n' % (namespace_, self.gds_format_integer(self.max_cert_size, input_name='max_cert_size'), namespace_))
if self.key_size is not None:
showIndent(outfile, level)
outfile.write('<%skey_size>%s</%skey_size>\n' % (namespace_, self.gds_format_integer(self.key_size, input_name='key_size'), namespace_))
if self.exponent is not None:
showIndent(outfile, level)
outfile.write('<%sexponent>%s</%sexponent>\n' % (namespace_, self.gds_format_integer(self.exponent, input_name='exponent'), namespace_))
if self.num_certs_in_certchain is not None:
showIndent(outfile, level)
outfile.write('<%snum_certs_in_certchain>%s</%snum_certs_in_certchain>\n' % (namespace_, self.gds_format_integer(self.num_certs_in_certchain, input_name='num_certs_in_certchain'), namespace_))
if self.num_root_certs is not None:
showIndent(outfile, level)
outfile.write('<%snum_root_certs>%s</%snum_root_certs>\n' % (namespace_, self.gds_format_integer(self.num_root_certs, input_name='num_root_certs'), namespace_))
if self.max_num_root_certs is not None:
showIndent(outfile, level)
outfile.write('<%smax_num_root_certs>%s</%smax_num_root_certs>\n' % (namespace_, self.gds_format_integer(self.max_num_root_certs, input_name='max_num_root_certs'), namespace_))
if self.msm_part is not None:
showIndent(outfile, level)
outfile.write('<%smsm_part>%s</%smsm_part>\n' % (namespace_, self.gds_format_string(quote_xml(self.msm_part).encode(ExternalEncoding), input_name='msm_part'), namespace_))
if self.sw_id is not None:
showIndent(outfile, level)
outfile.write('<%ssw_id>%s</%ssw_id>\n' % (namespace_, self.gds_format_string(quote_xml(self.sw_id).encode(ExternalEncoding), input_name='sw_id'), namespace_))
if self.oem_id is not None:
showIndent(outfile, level)
outfile.write('<%soem_id>%s</%soem_id>\n' % (namespace_, self.gds_format_string(quote_xml(self.oem_id).encode(ExternalEncoding), input_name='oem_id'), namespace_))
if self.model_id is not None:
showIndent(outfile, level)
outfile.write('<%smodel_id>%s</%smodel_id>\n' % (namespace_, self.gds_format_string(quote_xml(self.model_id).encode(ExternalEncoding), input_name='model_id'), namespace_))
if self.soc_hw_version is not None:
showIndent(outfile, level)
outfile.write('<%ssoc_hw_version>%s</%ssoc_hw_version>\n' % (namespace_, self.gds_format_string(quote_xml(self.soc_hw_version).encode(ExternalEncoding), input_name='soc_hw_version'), namespace_))
if self.in_use_soc_hw_version is not None:
showIndent(outfile, level)
outfile.write('<%sin_use_soc_hw_version>%s</%sin_use_soc_hw_version>\n' % (namespace_, self.gds_format_integer(self.in_use_soc_hw_version, input_name='in_use_soc_hw_version'), namespace_))
if self.use_serial_number_in_signing is not None:
showIndent(outfile, level)
outfile.write('<%suse_serial_number_in_signing>%s</%suse_serial_number_in_signing>\n' % (namespace_, self.gds_format_integer(self.use_serial_number_in_signing, input_name='use_serial_number_in_signing'), namespace_))
if self.debug is not None:
showIndent(outfile, level)
outfile.write('<%sdebug>%s</%sdebug>\n' % (namespace_, self.gds_format_string(quote_xml(self.debug).encode(ExternalEncoding), input_name='debug'), namespace_))
if self.hash_algorithm is not None:
showIndent(outfile, level)
outfile.write('<%shash_algorithm>%s</%shash_algorithm>\n' % (namespace_, self.gds_format_string(quote_xml(self.hash_algorithm).encode(ExternalEncoding), input_name='hash_algorithm'), namespace_))
if self.segment_hash_algorithm is not None:
showIndent(outfile, level)
outfile.write('<%ssegment_hash_algorithm>%s</%ssegment_hash_algorithm>\n' % (namespace_, self.gds_format_string(quote_xml(self.segment_hash_algorithm).encode(ExternalEncoding), input_name='segment_hash_algorithm'), namespace_))
if self.rsa_padding is not None:
showIndent(outfile, level)
outfile.write('<%srsa_padding>%s</%srsa_padding>\n' % (namespace_, self.gds_format_string(quote_xml(self.rsa_padding).encode(ExternalEncoding), input_name='rsa_padding'), namespace_))
if self.hmac is not None:
showIndent(outfile, level)
outfile.write('<%shmac>%s</%shmac>\n' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.hmac)), input_name='hmac'), namespace_))
if self.secboot_version is not None:
showIndent(outfile, level)
outfile.write('<%ssecboot_version>%s</%ssecboot_version>\n' % (namespace_, self.gds_format_string(quote_xml(self.secboot_version).encode(ExternalEncoding), input_name='secboot_version'), namespace_))
if self.oem_sign is not None:
showIndent(outfile, level)
outfile.write('<%soem_sign>%s</%soem_sign>\n' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.oem_sign)), input_name='oem_sign'), namespace_))
if self.dsa_type is not None:
showIndent(outfile, level)
outfile.write('<%sdsa_type>%s</%sdsa_type>\n' % (namespace_, self.gds_format_string(quote_xml(self.dsa_type).encode(ExternalEncoding), input_name='dsa_type'), namespace_))
def hasContent_(self):
if (
self.selected_cert_config is not None or
self.selected_signer is not None or
self.max_cert_size is not None or
self.key_size is not None or
self.exponent is not None or
self.num_certs_in_certchain is not None or
self.num_root_certs is not None or
self.max_num_root_certs is not None or
self.msm_part is not None or
self.sw_id is not None or
self.oem_id is not None or
self.model_id is not None or
self.soc_hw_version is not None or
self.in_use_soc_hw_version is not None or
self.use_serial_number_in_signing is not None or
self.debug is not None or
self.hash_algorithm is not None or
self.segment_hash_algorithm is not None or
self.rsa_padding is not None or
self.hmac is not None or
self.secboot_version is not None or
self.oem_sign is not None or
self.dsa_type is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='complex_general_properties'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.selected_cert_config is not None:
showIndent(outfile, level)
outfile.write('selected_cert_config=%s,\n' % quote_python(self.selected_cert_config).encode(ExternalEncoding))
if self.selected_signer is not None:
showIndent(outfile, level)
outfile.write('selected_signer=%s,\n' % quote_python(self.selected_signer).encode(ExternalEncoding))
if self.max_cert_size is not None:
showIndent(outfile, level)
outfile.write('max_cert_size=%d,\n' % self.max_cert_size)
if self.key_size is not None:
showIndent(outfile, level)
outfile.write('key_size=%d,\n' % self.key_size)
if self.exponent is not None:
showIndent(outfile, level)
outfile.write('exponent=%d,\n' % self.exponent)
if self.num_certs_in_certchain is not None:
showIndent(outfile, level)
outfile.write('num_certs_in_certchain=%d,\n' % self.num_certs_in_certchain)
if self.num_root_certs is not None:
showIndent(outfile, level)
outfile.write('num_root_certs=%d,\n' % self.num_root_certs)
if self.max_num_root_certs is not None:
showIndent(outfile, level)
outfile.write('max_num_root_certs=%d,\n' % self.max_num_root_certs)
if self.msm_part is not None:
showIndent(outfile, level)
outfile.write('msm_part=%s,\n' % quote_python(self.msm_part).encode(ExternalEncoding))
if self.sw_id is not None:
showIndent(outfile, level)
outfile.write('sw_id=%s,\n' % quote_python(self.sw_id).encode(ExternalEncoding))
if self.oem_id is not None:
showIndent(outfile, level)
outfile.write('oem_id=%s,\n' % quote_python(self.oem_id).encode(ExternalEncoding))
if self.model_id is not None:
showIndent(outfile, level)
outfile.write('model_id=%s,\n' % quote_python(self.model_id).encode(ExternalEncoding))
if self.soc_hw_version is not None:
showIndent(outfile, level)
outfile.write('soc_hw_version=%s,\n' % quote_python(self.soc_hw_version).encode(ExternalEncoding))
if self.in_use_soc_hw_version is not None:
showIndent(outfile, level)
outfile.write('in_use_soc_hw_version=%d,\n' % self.in_use_soc_hw_version)
if self.use_serial_number_in_signing is not None:
showIndent(outfile, level)
outfile.write('use_serial_number_in_signing=%d,\n' % self.use_serial_number_in_signing)
if self.debug is not None:
showIndent(outfile, level)
outfile.write('debug=%s,\n' % quote_python(self.debug).encode(ExternalEncoding))
if self.hash_algorithm is not None:
showIndent(outfile, level)
outfile.write('hash_algorithm=%s,\n' % quote_python(self.hash_algorithm).encode(ExternalEncoding))
if self.segment_hash_algorithm is not None:
showIndent(outfile, level)
outfile.write('segment_hash_algorithm=%s,\n' % quote_python(self.segment_hash_algorithm).encode(ExternalEncoding))
if self.rsa_padding is not None:
showIndent(outfile, level)
outfile.write('rsa_padding=%s,\n' % quote_python(self.rsa_padding).encode(ExternalEncoding))
if self.hmac is not None:
showIndent(outfile, level)
outfile.write('hmac=%s,\n' % self.hmac)
if self.secboot_version is not None:
showIndent(outfile, level)
outfile.write('secboot_version=%s,\n' % quote_python(self.secboot_version).encode(ExternalEncoding))
if self.oem_sign is not None:
showIndent(outfile, level)
outfile.write('oem_sign=%s,\n' % self.oem_sign)
if self.dsa_type is not None:
showIndent(outfile, level)
outfile.write('dsa_type=%s,\n' % quote_python(self.dsa_type).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'selected_cert_config':
selected_cert_config_ = child_.text
selected_cert_config_ = self.gds_validate_string(selected_cert_config_, node, 'selected_cert_config')
self.selected_cert_config = selected_cert_config_
elif nodeName_ == 'selected_signer':
selected_signer_ = child_.text
selected_signer_ = self.gds_validate_string(selected_signer_, node, 'selected_signer')
self.selected_signer = selected_signer_
elif nodeName_ == 'max_cert_size':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'max_cert_size')
self.max_cert_size = ival_
elif nodeName_ == 'key_size':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'key_size')
self.key_size = ival_
elif nodeName_ == 'exponent':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'exponent')
self.exponent = ival_
elif nodeName_ == 'num_certs_in_certchain':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'num_certs_in_certchain')
self.num_certs_in_certchain = ival_
elif nodeName_ == 'num_root_certs':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'num_root_certs')
self.num_root_certs = ival_
elif nodeName_ == 'max_num_root_certs':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'max_num_root_certs')
self.max_num_root_certs = ival_
elif nodeName_ == 'msm_part':
msm_part_ = child_.text
msm_part_ = self.gds_validate_string(msm_part_, node, 'msm_part')
self.msm_part = msm_part_
elif nodeName_ == 'sw_id':
sw_id_ = child_.text
sw_id_ = self.gds_validate_string(sw_id_, node, 'sw_id')
self.sw_id = sw_id_
elif nodeName_ == 'oem_id':
oem_id_ = child_.text
oem_id_ = self.gds_validate_string(oem_id_, node, 'oem_id')
self.oem_id = oem_id_
elif nodeName_ == 'model_id':
model_id_ = child_.text
model_id_ = self.gds_validate_string(model_id_, node, 'model_id')
self.model_id = model_id_
elif nodeName_ == 'soc_hw_version':
soc_hw_version_ = child_.text
soc_hw_version_ = self.gds_validate_string(soc_hw_version_, node, 'soc_hw_version')
self.soc_hw_version = soc_hw_version_
elif nodeName_ == 'in_use_soc_hw_version':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'in_use_soc_hw_version')
self.in_use_soc_hw_version = ival_
elif nodeName_ == 'use_serial_number_in_signing':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'use_serial_number_in_signing')
self.use_serial_number_in_signing = ival_
elif nodeName_ == 'debug':
debug_ = child_.text
debug_ = self.gds_validate_string(debug_, node, 'debug')
self.debug = debug_
elif nodeName_ == 'hash_algorithm':
hash_algorithm_ = child_.text
hash_algorithm_ = self.gds_validate_string(hash_algorithm_, node, 'hash_algorithm')
self.hash_algorithm = hash_algorithm_
elif nodeName_ == 'segment_hash_algorithm':
segment_hash_algorithm_ = child_.text
segment_hash_algorithm_ = self.gds_validate_string(segment_hash_algorithm_, node, 'segment_hash_algorithm')
self.segment_hash_algorithm = segment_hash_algorithm_
elif nodeName_ == 'rsa_padding':
rsa_padding_ = child_.text
rsa_padding_ = self.gds_validate_string(rsa_padding_, node, 'rsa_padding')
self.rsa_padding = rsa_padding_
elif nodeName_ == 'hmac':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'hmac')
self.hmac = ival_
elif nodeName_ == 'secboot_version':
secboot_version_ = child_.text
secboot_version_ = self.gds_validate_string(secboot_version_, node, 'secboot_version')
self.secboot_version = secboot_version_
elif nodeName_ == 'oem_sign':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'oem_sign')
self.oem_sign = ival_
elif nodeName_ == 'dsa_type':
dsa_type_ = child_.text
dsa_type_ = self.gds_validate_string(dsa_type_, node, 'dsa_type')
self.dsa_type = dsa_type_
# end class complex_general_properties
class complex_parsegen(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, image_types_list=None):
self.image_types_list = image_types_list
def factory(*args_, **kwargs_):
if complex_parsegen.subclass:
return complex_parsegen.subclass(*args_, **kwargs_)
else:
return complex_parsegen(*args_, **kwargs_)
factory = staticmethod(factory)
def get_image_types_list(self): return self.image_types_list
def set_image_types_list(self, image_types_list): self.image_types_list = image_types_list
def export(self, outfile, level, namespace_='tns:', name_='complex_parsegen', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='complex_parsegen')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='tns:', name_='complex_parsegen'):
pass
def exportChildren(self, outfile, level, namespace_='tns:', name_='complex_parsegen', fromsubclass_=False):
if self.image_types_list:
self.image_types_list.export(outfile, level, namespace_, name_='image_types_list')
def hasContent_(self):
if (
self.image_types_list is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='complex_parsegen'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.image_types_list is not None:
showIndent(outfile, level)
outfile.write('image_types_list=model_.complex_image_types_list(\n')
self.image_types_list.exportLiteral(outfile, level, name_='image_types_list')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'image_types_list':
obj_ = complex_image_types_list.factory()
obj_.build(child_)
self.set_image_types_list(obj_)
# end class complex_parsegen
class complex_image_types_list(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, image_type=None):
if image_type is None:
self.image_type = []
else:
self.image_type = image_type
def factory(*args_, **kwargs_):
if complex_image_types_list.subclass:
return complex_image_types_list.subclass(*args_, **kwargs_)
else:
return complex_image_types_list(*args_, **kwargs_)
factory = staticmethod(factory)
def get_image_type(self): return self.image_type
def set_image_type(self, image_type): self.image_type = image_type
def add_image_type(self, value): self.image_type.append(value)
def insert_image_type(self, index, value): self.image_type[index] = value
def export(self, outfile, level, namespace_='tns:', name_='complex_image_types_list', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='complex_image_types_list')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='tns:', name_='complex_image_types_list'):
pass
def exportChildren(self, outfile, level, namespace_='tns:', name_='complex_image_types_list', fromsubclass_=False):
for image_type_ in self.image_type:
image_type_.export(outfile, level, namespace_, name_='image_type')
def hasContent_(self):
if (
self.image_type
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='complex_image_types_list'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('image_type=[\n')
level += 1
for image_type_ in self.image_type:
showIndent(outfile, level)
outfile.write('model_.complex_image_type(\n')
image_type_.exportLiteral(outfile, level, name_='complex_image_type')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'image_type':
obj_ = complex_image_type.factory()
obj_.build(child_)
self.image_type.append(obj_)
# end class complex_image_types_list
class complex_image_type(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, file_type='elf', mbn_properties=None, elf_properties=None):
self.id = _cast(None, id)
self.file_type = file_type
self.mbn_properties = mbn_properties
self.elf_properties = elf_properties
def factory(*args_, **kwargs_):
if complex_image_type.subclass:
return complex_image_type.subclass(*args_, **kwargs_)
else:
return complex_image_type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_file_type(self): return self.file_type
def set_file_type(self, file_type): self.file_type = file_type
def get_mbn_properties(self): return self.mbn_properties
def set_mbn_properties(self, mbn_properties): self.mbn_properties = mbn_properties
def get_elf_properties(self): return self.elf_properties
def set_elf_properties(self, elf_properties): self.elf_properties = elf_properties
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='tns:', name_='complex_image_type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='complex_image_type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='tns:', name_='complex_image_type'):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='tns:', name_='complex_image_type', fromsubclass_=False):
if self.file_type is not None:
showIndent(outfile, level)
outfile.write('<%sfile_type>%s</%sfile_type>\n' % (namespace_, self.gds_format_string(quote_xml(self.file_type).encode(ExternalEncoding), input_name='file_type'), namespace_))
if self.mbn_properties:
self.mbn_properties.export(outfile, level, namespace_, name_='mbn_properties')
if self.elf_properties:
self.elf_properties.export(outfile, level, namespace_, name_='elf_properties')
def hasContent_(self):
if (
self.file_type is not None or
self.mbn_properties is not None or
self.elf_properties is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='complex_image_type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = "%s",\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
if self.file_type is not None:
showIndent(outfile, level)
outfile.write('file_type=%s,\n' % quote_python(self.file_type).encode(ExternalEncoding))
if self.mbn_properties is not None:
showIndent(outfile, level)
outfile.write('mbn_properties=model_.complex_mbn_properties(\n')
self.mbn_properties.exportLiteral(outfile, level, name_='mbn_properties')
showIndent(outfile, level)
outfile.write('),\n')
if self.elf_properties is not None:
showIndent(outfile, level)
outfile.write('elf_properties=model_.complex_elf_properties(\n')
self.elf_properties.exportLiteral(outfile, level, name_='elf_properties')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'file_type':
file_type_ = child_.text
file_type_ = self.gds_validate_string(file_type_, node, 'file_type')
self.file_type = file_type_
elif nodeName_ == 'mbn_properties':
obj_ = complex_mbn_properties.factory()
obj_.build(child_)
self.set_mbn_properties(obj_)
elif nodeName_ == 'elf_properties':
obj_ = complex_elf_properties.factory()
obj_.build(child_)
self.set_elf_properties(obj_)
# end class complex_image_type
class complex_mbn_properties(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, header_size=None):
self.header_size = header_size
def factory(*args_, **kwargs_):
if complex_mbn_properties.subclass:
return complex_mbn_properties.subclass(*args_, **kwargs_)
else:
return complex_mbn_properties(*args_, **kwargs_)
factory = staticmethod(factory)
def get_header_size(self): return self.header_size
def set_header_size(self, header_size): self.header_size = header_size
def export(self, outfile, level, namespace_='tns:', name_='complex_mbn_properties', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='complex_mbn_properties')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='tns:', name_='complex_mbn_properties'):
pass
def exportChildren(self, outfile, level, namespace_='tns:', name_='complex_mbn_properties', fromsubclass_=False):
if self.header_size is not None:
showIndent(outfile, level)
outfile.write('<%sheader_size>%s</%sheader_size>\n' % (namespace_, self.gds_format_integer(self.header_size, input_name='header_size'), namespace_))
def hasContent_(self):
if (
self.header_size is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='complex_mbn_properties'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.header_size is not None:
showIndent(outfile, level)
outfile.write('header_size=%d,\n' % self.header_size)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'header_size':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'header_size')
self.header_size = ival_
# end class complex_mbn_properties
class complex_elf_properties(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, max_elf_segments=None, hash_seg_placement=None, image_type=0, has_hash_table=True, validate_ph_addrs=True, validate_vir_addrs=False):
self.max_elf_segments = max_elf_segments
self.hash_seg_placement = hash_seg_placement
self.image_type = image_type
self.has_hash_table = has_hash_table
self.validate_ph_addrs = validate_ph_addrs
self.validate_vir_addrs = validate_vir_addrs
def factory(*args_, **kwargs_):
if complex_elf_properties.subclass:
return complex_elf_properties.subclass(*args_, **kwargs_)
else:
return complex_elf_properties(*args_, **kwargs_)
factory = staticmethod(factory)
def get_max_elf_segments(self): return self.max_elf_segments
def set_max_elf_segments(self, max_elf_segments): self.max_elf_segments = max_elf_segments
def get_hash_seg_placement(self): return self.hash_seg_placement
def set_hash_seg_placement(self, hash_seg_placement): self.hash_seg_placement = hash_seg_placement
def get_image_type(self): return self.image_type
def set_image_type(self, image_type): self.image_type = image_type
def get_has_hash_table(self): return self.has_hash_table
def set_has_hash_table(self, has_hash_table): self.has_hash_table = has_hash_table
def get_validate_ph_addrs(self): return self.validate_ph_addrs
def set_validate_ph_addrs(self, validate_ph_addrs): self.validate_ph_addrs = validate_ph_addrs
def get_validate_vir_addrs(self): return self.validate_vir_addrs
def set_validate_vir_addrs(self, validate_vir_addrs): self.validate_vir_addrs = validate_vir_addrs
def export(self, outfile, level, namespace_='tns:', name_='complex_elf_properties', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='complex_elf_properties')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='tns:', name_='complex_elf_properties'):
pass
def exportChildren(self, outfile, level, namespace_='tns:', name_='complex_elf_properties', fromsubclass_=False):
if self.max_elf_segments is not None:
showIndent(outfile, level)
outfile.write('<%smax_elf_segments>%s</%smax_elf_segments>\n' % (namespace_, self.gds_format_integer(self.max_elf_segments, input_name='max_elf_segments'), namespace_))
if self.hash_seg_placement is not None:
showIndent(outfile, level)
outfile.write('<%shash_seg_placement>%s</%shash_seg_placement>\n' % (namespace_, self.gds_format_string(quote_xml(self.hash_seg_placement).encode(ExternalEncoding), input_name='hash_seg_placement'), namespace_))
if self.image_type is not None:
showIndent(outfile, level)
outfile.write('<%simage_type>%s</%simage_type>\n' % (namespace_, self.gds_format_integer(self.image_type, input_name='image_type'), namespace_))
if self.has_hash_table is not None:
showIndent(outfile, level)
outfile.write('<%shas_hash_table>%s</%shas_hash_table>\n' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.has_hash_table)), input_name='has_hash_table'), namespace_))
if self.validate_ph_addrs is not None:
showIndent(outfile, level)
outfile.write('<%svalidate_ph_addrs>%s</%svalidate_ph_addrs>\n' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.validate_ph_addrs)), input_name='validate_ph_addrs'), namespace_))
if self.validate_vir_addrs is not None:
showIndent(outfile, level)
outfile.write('<%svalidate_vir_addrs>%s</%svalidate_vir_addrs>\n' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.validate_vir_addrs)), input_name='validate_vir_addrs'), namespace_))
def hasContent_(self):
if (
self.max_elf_segments is not None or
self.hash_seg_placement is not None or
self.image_type is not None or
self.has_hash_table is not None or
self.validate_ph_addrs is not None or
self.validate_vir_addrs is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='complex_elf_properties'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.max_elf_segments is not None:
showIndent(outfile, level)
outfile.write('max_elf_segments=%d,\n' % self.max_elf_segments)
if self.hash_seg_placement is not None:
showIndent(outfile, level)
outfile.write('hash_seg_placement=%s,\n' % quote_python(self.hash_seg_placement).encode(ExternalEncoding))
if self.image_type is not None:
showIndent(outfile, level)
outfile.write('image_type=%d,\n' % self.image_type)
if self.has_hash_table is not None:
showIndent(outfile, level)
outfile.write('has_hash_table=%s,\n' % self.has_hash_table)
if self.validate_ph_addrs is not None:
showIndent(outfile, level)
outfile.write('validate_ph_addrs=%s,\n' % self.validate_ph_addrs)
if self.validate_vir_addrs is not None:
showIndent(outfile, level)
outfile.write('validate_vir_addrs=%s,\n' % self.validate_vir_addrs)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'max_elf_segments':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'max_elf_segments')
self.max_elf_segments = ival_
elif nodeName_ == 'hash_seg_placement':
hash_seg_placement_ = child_.text
hash_seg_placement_ = self.gds_validate_string(hash_seg_placement_, node, 'hash_seg_placement')
self.hash_seg_placement = hash_seg_placement_
elif nodeName_ == 'image_type':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'image_type')
self.image_type = ival_
elif nodeName_ == 'has_hash_table':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'has_hash_table')
self.has_hash_table = ival_
elif nodeName_ == 'validate_ph_addrs':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'validate_ph_addrs')
self.validate_ph_addrs = ival_
elif nodeName_ == 'validate_vir_addrs':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'validate_vir_addrs')
self.validate_vir_addrs = ival_
# end class complex_elf_properties
class complex_data_provisioning(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, base_path='./../../resources/data_prov_assets/'):
self.base_path = base_path
def factory(*args_, **kwargs_):
if complex_data_provisioning.subclass:
return complex_data_provisioning.subclass(*args_, **kwargs_)
else:
return complex_data_provisioning(*args_, **kwargs_)
factory = staticmethod(factory)
def get_base_path(self): return self.base_path
def set_base_path(self, base_path): self.base_path = base_path
def export(self, outfile, level, namespace_='tns:', name_='complex_data_provisioning', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='complex_data_provisioning')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='tns:', name_='complex_data_provisioning'):
pass
def exportChildren(self, outfile, level, namespace_='tns:', name_='complex_data_provisioning', fromsubclass_=False):
if self.base_path is not None:
showIndent(outfile, level)
outfile.write('<%sbase_path>%s</%sbase_path>\n' % (namespace_, self.gds_format_string(quote_xml(self.base_path).encode(ExternalEncoding), input_name='base_path'), namespace_))
def hasContent_(self):
if (
self.base_path is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='complex_data_provisioning'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.base_path is not None:
showIndent(outfile, level)
outfile.write('base_path=%s,\n' % quote_python(self.base_path).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'base_path':
base_path_ = child_.text
base_path_ = self.gds_validate_string(base_path_, node, 'base_path')
self.base_path = base_path_
# end class complex_data_provisioning
class complex_images_list(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, image=None):
if image is None:
self.image = []
else:
self.image = image
def factory(*args_, **kwargs_):
if complex_images_list.subclass:
return complex_images_list.subclass(*args_, **kwargs_)
else:
return complex_images_list(*args_, **kwargs_)
factory = staticmethod(factory)
def get_image(self): return self.image
def set_image(self, image): self.image = image
def add_image(self, value): self.image.append(value)
def insert_image(self, index, value): self.image[index] = value
def export(self, outfile, level, namespace_='tns:', name_='complex_images_list', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='complex_images_list')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='tns:', name_='complex_images_list'):
pass
def exportChildren(self, outfile, level, namespace_='tns:', name_='complex_images_list', fromsubclass_=False):
for image_ in self.image:
image_.export(outfile, level, namespace_, name_='image')
def hasContent_(self):
if (
self.image
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='complex_images_list'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('image=[\n')
level += 1
for image_ in self.image:
showIndent(outfile, level)
outfile.write('model_.complex_image(\n')
image_.exportLiteral(outfile, level, name_='complex_image')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'image':
obj_ = complex_image.factory()
obj_.build(child_)
self.image.append(obj_)
# end class complex_images_list
class complex_image(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, image_type=None, sign_id=None, output_file_name=None, cert_config=None, name=None, general_properties_overrides=None):
self.image_type = _cast(None, image_type)
self.sign_id = _cast(None, sign_id)
self.output_file_name = _cast(None, output_file_name)
self.cert_config = _cast(None, cert_config)
self.name = _cast(None, name)
self.general_properties_overrides = general_properties_overrides
def factory(*args_, **kwargs_):
if complex_image.subclass:
return complex_image.subclass(*args_, **kwargs_)
else:
return complex_image(*args_, **kwargs_)
factory = staticmethod(factory)
def get_general_properties_overrides(self): return self.general_properties_overrides
def set_general_properties_overrides(self, general_properties_overrides): self.general_properties_overrides = general_properties_overrides
def get_image_type(self): return self.image_type
def set_image_type(self, image_type): self.image_type = image_type
def get_sign_id(self): return self.sign_id
def set_sign_id(self, sign_id): self.sign_id = sign_id
def get_output_file_name(self): return self.output_file_name
def set_output_file_name(self, output_file_name): self.output_file_name = output_file_name
def get_cert_config(self): return self.cert_config
def set_cert_config(self, cert_config): self.cert_config = cert_config
def get_name(self): return self.name
def set_name(self, name): self.name = name
def export(self, outfile, level, namespace_='tns:', name_='complex_image', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='complex_image')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='tns:', name_='complex_image'):
if self.image_type is not None and 'image_type' not in already_processed:
already_processed.append('image_type')
outfile.write(' image_type=%s' % (self.gds_format_string(quote_attrib(self.image_type).encode(ExternalEncoding), input_name='image_type'), ))
if self.sign_id is not None and 'sign_id' not in already_processed:
already_processed.append('sign_id')
outfile.write(' sign_id=%s' % (self.gds_format_string(quote_attrib(self.sign_id).encode(ExternalEncoding), input_name='sign_id'), ))
if self.output_file_name is not None and 'output_file_name' not in already_processed:
already_processed.append('output_file_name')
outfile.write(' output_file_name=%s' % (self.gds_format_string(quote_attrib(self.output_file_name).encode(ExternalEncoding), input_name='output_file_name'), ))
if self.cert_config is not None and 'cert_config' not in already_processed:
already_processed.append('cert_config')
outfile.write(' cert_config=%s' % (self.gds_format_string(quote_attrib(self.cert_config).encode(ExternalEncoding), input_name='cert_config'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='tns:', name_='complex_image', fromsubclass_=False):
if self.general_properties_overrides:
self.general_properties_overrides.export(outfile, level, namespace_, name_='general_properties_overrides', )
def hasContent_(self):
if (
self.general_properties_overrides is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='complex_image'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.image_type is not None and 'image_type' not in already_processed:
already_processed.append('image_type')
showIndent(outfile, level)
outfile.write('image_type = "%s",\n' % (self.image_type,))
if self.sign_id is not None and 'sign_id' not in already_processed:
already_processed.append('sign_id')
showIndent(outfile, level)
outfile.write('sign_id = "%s",\n' % (self.sign_id,))
if self.output_file_name is not None and 'output_file_name' not in already_processed:
already_processed.append('output_file_name')
showIndent(outfile, level)
outfile.write('output_file_name = "%s",\n' % (self.output_file_name,))
if self.cert_config is not None and 'cert_config' not in already_processed:
already_processed.append('cert_config')
showIndent(outfile, level)
outfile.write('cert_config = "%s",\n' % (self.cert_config,))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
if self.general_properties_overrides is not None:
showIndent(outfile, level)
outfile.write('general_properties_overrides=model_.complex_general_properties_overrides(\n')
self.general_properties_overrides.exportLiteral(outfile, level, name_='general_properties_overrides')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('image_type', node)
if value is not None and 'image_type' not in already_processed:
already_processed.append('image_type')
self.image_type = value
value = find_attr_value_('sign_id', node)
if value is not None and 'sign_id' not in already_processed:
already_processed.append('sign_id')
self.sign_id = value
value = find_attr_value_('output_file_name', node)
if value is not None and 'output_file_name' not in already_processed:
already_processed.append('output_file_name')
self.output_file_name = value
value = find_attr_value_('cert_config', node)
if value is not None and 'cert_config' not in already_processed:
already_processed.append('cert_config')
self.cert_config = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'general_properties_overrides':
obj_ = complex_general_properties_overrides.factory()
obj_.build(child_)
self.set_general_properties_overrides(obj_)
# end class complex_image
class complex_general_properties_overrides(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, selected_cert_config=None, selected_signer=None, max_cert_size=None, key_size=None, exponent=None, num_certs_in_certchain=None, num_root_certs=None, max_num_root_certs=None, msm_part=None, sw_id=None, oem_id=None, model_id=None, soc_hw_version=None, in_use_soc_hw_version=None, use_serial_number_in_signing=None, debug=None, hash_algorithm=None, segment_hash_algorithm=None, rsa_padding=None, hmac=None, secboot_version=None, oem_sign=None, dsa_type=None):
self.selected_cert_config = selected_cert_config
self.selected_signer = selected_signer
self.max_cert_size = max_cert_size
self.key_size = key_size
self.exponent = exponent
self.num_certs_in_certchain = num_certs_in_certchain
self.num_root_certs = num_root_certs
self.max_num_root_certs = max_num_root_certs
self.msm_part = msm_part
self.sw_id = sw_id
self.oem_id = oem_id
self.model_id = model_id
self.soc_hw_version = soc_hw_version
self.in_use_soc_hw_version = in_use_soc_hw_version
self.use_serial_number_in_signing = use_serial_number_in_signing
self.debug = debug
self.hash_algorithm = hash_algorithm
self.segment_hash_algorithm = segment_hash_algorithm
self.rsa_padding = rsa_padding
self.hmac = hmac
self.secboot_version = secboot_version
self.oem_sign = oem_sign
self.dsa_type = dsa_type
def factory(*args_, **kwargs_):
if complex_general_properties_overrides.subclass:
return complex_general_properties_overrides.subclass(*args_, **kwargs_)
else:
return complex_general_properties_overrides(*args_, **kwargs_)
factory = staticmethod(factory)
def get_selected_cert_config(self): return self.selected_cert_config
def set_selected_cert_config(self, selected_cert_config): self.selected_cert_config = selected_cert_config
def get_selected_signer(self): return self.selected_signer
def set_selected_signer(self, selected_signer): self.selected_signer = selected_signer
def get_max_cert_size(self): return self.max_cert_size
def set_max_cert_size(self, max_cert_size): self.max_cert_size = max_cert_size
def get_key_size(self): return self.key_size
def set_key_size(self, key_size): self.key_size = key_size
def get_exponent(self): return self.exponent
def set_exponent(self, exponent): self.exponent = exponent
def get_num_certs_in_certchain(self): return self.num_certs_in_certchain
def set_num_certs_in_certchain(self, num_certs_in_certchain): self.num_certs_in_certchain = num_certs_in_certchain
def get_num_root_certs(self): return self.num_root_certs
def set_num_root_certs(self, num_root_certs): self.num_root_certs = num_root_certs
def get_max_num_root_certs(self): return self.max_num_root_certs
def set_max_num_root_certs(self, max_num_root_certs): self.max_num_root_certs = max_num_root_certs
def get_msm_part(self): return self.msm_part
def set_msm_part(self, msm_part): self.msm_part = msm_part
def get_sw_id(self): return self.sw_id
def set_sw_id(self, sw_id): self.sw_id = sw_id
def get_oem_id(self): return self.oem_id
def set_oem_id(self, oem_id): self.oem_id = oem_id
def get_model_id(self): return self.model_id
def set_model_id(self, model_id): self.model_id = model_id
def get_soc_hw_version(self): return self.soc_hw_version
def set_soc_hw_version(self, soc_hw_version): self.soc_hw_version = soc_hw_version
def get_in_use_soc_hw_version(self): return self.in_use_soc_hw_version
def set_in_use_soc_hw_version(self, in_use_soc_hw_version): self.in_use_soc_hw_version = in_use_soc_hw_version
def get_use_serial_number_in_signing(self): return self.use_serial_number_in_signing
def set_use_serial_number_in_signing(self, use_serial_number_in_signing): self.use_serial_number_in_signing = use_serial_number_in_signing
def get_debug(self): return self.debug
def set_debug(self, debug): self.debug = debug
def get_hash_algorithm(self): return self.hash_algorithm
def set_hash_algorithm(self, hash_algorithm): self.hash_algorithm = hash_algorithm
def get_segment_hash_algorithm(self): return self.segment_hash_algorithm
def set_segment_hash_algorithm(self, segment_hash_algorithm): self.segment_hash_algorithm = segment_hash_algorithm
def get_rsa_padding(self): return self.rsa_padding
def set_rsa_padding(self, rsa_padding): self.rsa_padding = rsa_padding
def get_hmac(self): return self.hmac
def set_hmac(self, hmac): self.hmac = hmac
def get_secboot_version(self): return self.secboot_version
def set_secboot_version(self, secboot_version): self.secboot_version = secboot_version
def get_oem_sign(self): return self.oem_sign
def set_oem_sign(self, oem_sign): self.oem_sign = oem_sign
def get_dsa_type(self): return self.dsa_type
def set_dsa_type(self, dsa_type): self.dsa_type = dsa_type
def export(self, outfile, level, namespace_='tns:', name_='complex_general_properties_overrides', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='complex_general_properties_overrides')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='tns:', name_='complex_general_properties_overrides'):
pass
def exportChildren(self, outfile, level, namespace_='tns:', name_='complex_general_properties_overrides', fromsubclass_=False):
if self.selected_cert_config is not None:
showIndent(outfile, level)
outfile.write('<%sselected_cert_config>%s</%sselected_cert_config>\n' % (namespace_, self.gds_format_string(quote_xml(self.selected_cert_config).encode(ExternalEncoding), input_name='selected_cert_config'), namespace_))
if self.selected_signer is not None:
showIndent(outfile, level)
outfile.write('<%sselected_signer>%s</%sselected_signer>\n' % (namespace_, self.gds_format_string(quote_xml(self.selected_signer).encode(ExternalEncoding), input_name='selected_signer'), namespace_))
if self.max_cert_size is not None:
showIndent(outfile, level)
outfile.write('<%smax_cert_size>%s</%smax_cert_size>\n' % (namespace_, self.gds_format_integer(self.max_cert_size, input_name='max_cert_size'), namespace_))
if self.key_size is not None:
showIndent(outfile, level)
outfile.write('<%skey_size>%s</%skey_size>\n' % (namespace_, self.gds_format_integer(self.key_size, input_name='key_size'), namespace_))
if self.exponent is not None:
showIndent(outfile, level)
outfile.write('<%sexponent>%s</%sexponent>\n' % (namespace_, self.gds_format_integer(self.exponent, input_name='exponent'), namespace_))
if self.num_certs_in_certchain is not None:
showIndent(outfile, level)
outfile.write('<%snum_certs_in_certchain>%s</%snum_certs_in_certchain>\n' % (namespace_, self.gds_format_integer(self.num_certs_in_certchain, input_name='num_certs_in_certchain'), namespace_))
if self.num_root_certs is not None:
showIndent(outfile, level)
outfile.write('<%snum_root_certs>%s</%snum_root_certs>\n' % (namespace_, self.gds_format_integer(self.num_root_certs, input_name='num_root_certs'), namespace_))
if self.max_num_root_certs is not None:
showIndent(outfile, level)
outfile.write('<%smax_num_root_certs>%s</%smax_num_root_certs>\n' % (namespace_, self.gds_format_integer(self.max_num_root_certs, input_name='max_num_root_certs'), namespace_))
if self.msm_part is not None:
showIndent(outfile, level)
outfile.write('<%smsm_part>%s</%smsm_part>\n' % (namespace_, self.gds_format_string(quote_xml(self.msm_part).encode(ExternalEncoding), input_name='msm_part'), namespace_))
if self.sw_id is not None:
showIndent(outfile, level)
outfile.write('<%ssw_id>%s</%ssw_id>\n' % (namespace_, self.gds_format_string(quote_xml(self.sw_id).encode(ExternalEncoding), input_name='sw_id'), namespace_))
if self.oem_id is not None:
showIndent(outfile, level)
outfile.write('<%soem_id>%s</%soem_id>\n' % (namespace_, self.gds_format_string(quote_xml(self.oem_id).encode(ExternalEncoding), input_name='oem_id'), namespace_))
if self.model_id is not None:
showIndent(outfile, level)
outfile.write('<%smodel_id>%s</%smodel_id>\n' % (namespace_, self.gds_format_string(quote_xml(self.model_id).encode(ExternalEncoding), input_name='model_id'), namespace_))
if self.soc_hw_version is not None:
showIndent(outfile, level)
outfile.write('<%ssoc_hw_version>%s</%ssoc_hw_version>\n' % (namespace_, self.gds_format_string(quote_xml(self.soc_hw_version).encode(ExternalEncoding), input_name='soc_hw_version'), namespace_))
if self.in_use_soc_hw_version is not None:
showIndent(outfile, level)
outfile.write('<%sin_use_soc_hw_version>%s</%sin_use_soc_hw_version>\n' % (namespace_, self.gds_format_integer(self.in_use_soc_hw_version, input_name='in_use_soc_hw_version'), namespace_))
if self.use_serial_number_in_signing is not None:
showIndent(outfile, level)
outfile.write('<%suse_serial_number_in_signing>%s</%suse_serial_number_in_signing>\n' % (namespace_, self.gds_format_integer(self.use_serial_number_in_signing, input_name='use_serial_number_in_signing'), namespace_))
if self.debug is not None:
showIndent(outfile, level)
outfile.write('<%sdebug>%s</%sdebug>\n' % (namespace_, self.gds_format_string(quote_xml(self.debug).encode(ExternalEncoding), input_name='debug'), namespace_))
if self.hash_algorithm is not None:
showIndent(outfile, level)
outfile.write('<%shash_algorithm>%s</%shash_algorithm>\n' % (namespace_, self.gds_format_string(quote_xml(self.hash_algorithm).encode(ExternalEncoding), input_name='hash_algorithm'), namespace_))
if self.segment_hash_algorithm is not None:
showIndent(outfile, level)
outfile.write('<%ssegment_hash_algorithm>%s</%ssegment_hash_algorithm>\n' % (namespace_, self.gds_format_string(quote_xml(self.segment_hash_algorithm).encode(ExternalEncoding), input_name='segment_hash_algorithm'), namespace_))
if self.rsa_padding is not None:
showIndent(outfile, level)
outfile.write('<%srsa_padding>%s</%srsa_padding>\n' % (namespace_, self.gds_format_string(quote_xml(self.rsa_padding).encode(ExternalEncoding), input_name='rsa_padding'), namespace_))
if self.hmac is not None:
showIndent(outfile, level)
outfile.write('<%shmac>%s</%shmac>\n' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.hmac)), input_name='hmac'), namespace_))
if self.secboot_version is not None:
showIndent(outfile, level)
outfile.write('<%ssecboot_version>%s</%ssecboot_version>\n' % (namespace_, self.gds_format_string(quote_xml(self.secboot_version).encode(ExternalEncoding), input_name='secboot_version'), namespace_))
if self.oem_sign is not None:
showIndent(outfile, level)
outfile.write('<%soem_sign>%s</%soem_sign>\n' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.oem_sign)), input_name='oem_sign'), namespace_))
if self.dsa_type is not None:
showIndent(outfile, level)
outfile.write('<%sdsa_type>%s</%sdsa_type>\n' % (namespace_, self.gds_format_string(quote_xml(self.dsa_type).encode(ExternalEncoding), input_name='dsa_type'), namespace_))
def hasContent_(self):
if (
self.selected_cert_config is not None or
self.selected_signer is not None or
self.max_cert_size is not None or
self.key_size is not None or
self.exponent is not None or
self.num_certs_in_certchain is not None or
self.num_root_certs is not None or
self.max_num_root_certs is not None or
self.msm_part is not None or
self.sw_id is not None or
self.oem_id is not None or
self.model_id is not None or
self.soc_hw_version is not None or
self.in_use_soc_hw_version is not None or
self.use_serial_number_in_signing is not None or
self.debug is not None or
self.hash_algorithm is not None or
self.segment_hash_algorithm is not None or
self.rsa_padding is not None or
self.hmac is not None or
self.secboot_version is not None or
self.oem_sign is not None or
self.dsa_type is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='complex_general_properties_overrides'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.selected_cert_config is not None:
showIndent(outfile, level)
outfile.write('selected_cert_config=%s,\n' % quote_python(self.selected_cert_config).encode(ExternalEncoding))
if self.selected_signer is not None:
showIndent(outfile, level)
outfile.write('selected_signer=%s,\n' % quote_python(self.selected_signer).encode(ExternalEncoding))
if self.max_cert_size is not None:
showIndent(outfile, level)
outfile.write('max_cert_size=%d,\n' % self.max_cert_size)
if self.key_size is not None:
showIndent(outfile, level)
outfile.write('key_size=%d,\n' % self.key_size)
if self.exponent is not None:
showIndent(outfile, level)
outfile.write('exponent=%d,\n' % self.exponent)
if self.num_certs_in_certchain is not None:
showIndent(outfile, level)
outfile.write('num_certs_in_certchain=%d,\n' % self.num_certs_in_certchain)
if self.num_root_certs is not None:
showIndent(outfile, level)
outfile.write('num_root_certs=%d,\n' % self.num_root_certs)
if self.max_num_root_certs is not None:
showIndent(outfile, level)
outfile.write('max_num_root_certs=%d,\n' % self.max_num_root_certs)
if self.msm_part is not None:
showIndent(outfile, level)
outfile.write('msm_part=%s,\n' % quote_python(self.msm_part).encode(ExternalEncoding))
if self.sw_id is not None:
showIndent(outfile, level)
outfile.write('sw_id=%s,\n' % quote_python(self.sw_id).encode(ExternalEncoding))
if self.oem_id is not None:
showIndent(outfile, level)
outfile.write('oem_id=%s,\n' % quote_python(self.oem_id).encode(ExternalEncoding))
if self.model_id is not None:
showIndent(outfile, level)
outfile.write('model_id=%s,\n' % quote_python(self.model_id).encode(ExternalEncoding))
if self.soc_hw_version is not None:
showIndent(outfile, level)
outfile.write('soc_hw_version=%s,\n' % quote_python(self.soc_hw_version).encode(ExternalEncoding))
if self.in_use_soc_hw_version is not None:
showIndent(outfile, level)
outfile.write('in_use_soc_hw_version=%d,\n' % self.in_use_soc_hw_version)
if self.use_serial_number_in_signing is not None:
showIndent(outfile, level)
outfile.write('use_serial_number_in_signing=%d,\n' % self.use_serial_number_in_signing)
if self.debug is not None:
showIndent(outfile, level)
outfile.write('debug=%s,\n' % quote_python(self.debug).encode(ExternalEncoding))
if self.hash_algorithm is not None:
showIndent(outfile, level)
outfile.write('hash_algorithm=%s,\n' % quote_python(self.hash_algorithm).encode(ExternalEncoding))
if self.segment_hash_algorithm is not None:
showIndent(outfile, level)
outfile.write('segment_hash_algorithm=%s,\n' % quote_python(self.segment_hash_algorithm).encode(ExternalEncoding))
if self.rsa_padding is not None:
showIndent(outfile, level)
outfile.write('rsa_padding=%s,\n' % quote_python(self.rsa_padding).encode(ExternalEncoding))
if self.hmac is not None:
showIndent(outfile, level)
outfile.write('hmac=%s,\n' % self.hmac)
if self.secboot_version is not None:
showIndent(outfile, level)
outfile.write('secboot_version=%s,\n' % quote_python(self.secboot_version).encode(ExternalEncoding))
if self.oem_sign is not None:
showIndent(outfile, level)
outfile.write('oem_sign=%s,\n' % self.oem_sign)
if self.dsa_type is not None:
showIndent(outfile, level)
outfile.write('dsa_type=%s,\n' % quote_python(self.dsa_type).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'selected_cert_config':
selected_cert_config_ = child_.text
selected_cert_config_ = self.gds_validate_string(selected_cert_config_, node, 'selected_cert_config')
self.selected_cert_config = selected_cert_config_
elif nodeName_ == 'selected_signer':
selected_signer_ = child_.text
selected_signer_ = self.gds_validate_string(selected_signer_, node, 'selected_signer')
self.selected_signer = selected_signer_
elif nodeName_ == 'max_cert_size':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'max_cert_size')
self.max_cert_size = ival_
elif nodeName_ == 'key_size':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'key_size')
self.key_size = ival_
elif nodeName_ == 'exponent':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'exponent')
self.exponent = ival_
elif nodeName_ == 'num_certs_in_certchain':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'num_certs_in_certchain')
self.num_certs_in_certchain = ival_
elif nodeName_ == 'num_root_certs':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'num_root_certs')
self.num_root_certs = ival_
elif nodeName_ == 'max_num_root_certs':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'max_num_root_certs')
self.max_num_root_certs = ival_
elif nodeName_ == 'msm_part':
msm_part_ = child_.text
msm_part_ = self.gds_validate_string(msm_part_, node, 'msm_part')
self.msm_part = msm_part_
elif nodeName_ == 'sw_id':
sw_id_ = child_.text
sw_id_ = self.gds_validate_string(sw_id_, node, 'sw_id')
self.sw_id = sw_id_
elif nodeName_ == 'oem_id':
oem_id_ = child_.text
oem_id_ = self.gds_validate_string(oem_id_, node, 'oem_id')
self.oem_id = oem_id_
elif nodeName_ == 'model_id':
model_id_ = child_.text
model_id_ = self.gds_validate_string(model_id_, node, 'model_id')
self.model_id = model_id_
elif nodeName_ == 'soc_hw_version':
soc_hw_version_ = child_.text
soc_hw_version_ = self.gds_validate_string(soc_hw_version_, node, 'soc_hw_version')
self.soc_hw_version = soc_hw_version_
elif nodeName_ == 'in_use_soc_hw_version':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'in_use_soc_hw_version')
self.in_use_soc_hw_version = ival_
elif nodeName_ == 'use_serial_number_in_signing':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'use_serial_number_in_signing')
self.use_serial_number_in_signing = ival_
elif nodeName_ == 'debug':
debug_ = child_.text
debug_ = self.gds_validate_string(debug_, node, 'debug')
self.debug = debug_
elif nodeName_ == 'hash_algorithm':
hash_algorithm_ = child_.text
hash_algorithm_ = self.gds_validate_string(hash_algorithm_, node, 'hash_algorithm')
self.hash_algorithm = hash_algorithm_
elif nodeName_ == 'segment_hash_algorithm':
segment_hash_algorithm_ = child_.text
segment_hash_algorithm_ = self.gds_validate_string(segment_hash_algorithm_, node, 'segment_hash_algorithm')
self.segment_hash_algorithm = segment_hash_algorithm_
elif nodeName_ == 'rsa_padding':
rsa_padding_ = child_.text
rsa_padding_ = self.gds_validate_string(rsa_padding_, node, 'rsa_padding')
self.rsa_padding = rsa_padding_
elif nodeName_ == 'hmac':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'hmac')
self.hmac = ival_
elif nodeName_ == 'secboot_version':
secboot_version_ = child_.text
secboot_version_ = self.gds_validate_string(secboot_version_, node, 'secboot_version')
self.secboot_version = secboot_version_
elif nodeName_ == 'oem_sign':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'oem_sign')
self.oem_sign = ival_
elif nodeName_ == 'dsa_type':
dsa_type_ = child_.text
dsa_type_ = self.gds_validate_string(dsa_type_, node, 'dsa_type')
self.dsa_type = dsa_type_
# end class complex_general_properties_overrides
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'iot'
rootClass = iot
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:tns="http://www.qualcomm.com/iot"')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'iot'
rootClass = iot
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="iot",
namespacedef_='xmlns:tns="http://www.qualcomm.com/iot"')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'iot'
rootClass = iot
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from auto_gen_xml_config import *\n\n')
sys.stdout.write('import auto_gen_xml_config as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"complex_data_provisioning",
"complex_elf_properties",
"complex_general_properties",
"complex_general_properties_overrides",
"complex_image",
"complex_image_type",
"complex_image_types_list",
"complex_images_list",
"complex_mbn_properties",
"complex_metadata",
"complex_parsegen",
"iot"
]
| 50.444543
| 534
| 0.655186
| 13,911
| 113,702
| 5.01639
| 0.029689
| 0.055544
| 0.027342
| 0.07231
| 0.847757
| 0.808621
| 0.772896
| 0.736956
| 0.711778
| 0.690526
| 0
| 0.002735
| 0.241209
| 113,702
| 2,253
| 535
| 50.466933
| 0.806099
| 0.015224
| 0
| 0.699436
| 1
| 0
| 0.095589
| 0.043572
| 0
| 0
| 0.000608
| 0
| 0
| 0
| null | null | 0.014581
| 0.016463
| null | null | 0.002822
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
24a170d536a3d9e97d41fe2c76e62dbe7577dd49
| 19,081
|
py
|
Python
|
tests/test_sensor_cron_sensor.py
|
EncoreTechnologies/stackstorm-errors
|
9001fbef2c830d050ed7ccda800b74733ab41fc9
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sensor_cron_sensor.py
|
EncoreTechnologies/stackstorm-errors
|
9001fbef2c830d050ed7ccda800b74733ab41fc9
|
[
"Apache-2.0"
] | 2
|
2021-02-05T16:27:31.000Z
|
2021-09-23T17:58:59.000Z
|
tests/test_sensor_cron_sensor.py
|
EncoreTechnologies/stackstorm-errors
|
9001fbef2c830d050ed7ccda800b74733ab41fc9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2019 Encore Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2tests.base import BaseSensorTestCase
from cron_sensor import CronSensor
from st2reactor.sensor.base import PollingSensor
import mock
import datetime
import pytz
import yaml
from freezegun import freeze_time
__all__ = [
'CronSensorTestCase'
]
class CronSensorTestCase(BaseSensorTestCase):
__test__ = True
sensor_cls = CronSensor
def test_init(self):
sensor = self.get_sensor_instance()
self.assertIsInstance(sensor, CronSensor)
self.assertIsInstance(sensor, PollingSensor)
@mock.patch('cron_sensor.Client')
@mock.patch('cron_sensor.socket')
def test_setup(self, mock_socket, mock_client):
config = yaml.safe_load(self.get_fixture_content('config_good.yaml'))
sensor = self.get_sensor_instance(config)
mock_socket.getfqdn.return_value = "st2_test"
mock_client.return_value = "Client"
sensor.setup()
@freeze_time("2018-10-26 01:00")
def test_poll(self):
sensor = self.get_sensor_instance()
sensor.st2_fqdn = 'st2_test'
sensor.kv_sensor_name = 'test_sensor'
sensor.kv_enforcements = {}
trigger_attributes = {
'type': "core.st2.CronTimer",
'parameters': {
'day_of_week': '*',
'hour': 1,
'minute': 0,
'second': 0,
'timezone': 'UTC'
}
}
mock_rule = mock.Mock(ref='test_rule', trigger=trigger_attributes)
mock_enforcement = mock.Mock(enforced_at='2018-10-26T01:00:00.01Z',
execution_id='test_execution',
rule={'ref': 'test_rule'})
mock_st2_client = mock.MagicMock()
mock_st2_client.rules.query.return_value = [mock_rule]
mock_st2_client.ruleenforcements.query.return_value = [mock_enforcement]
sensor.st2_client = mock_st2_client
sensor.poll()
@freeze_time("2018-10-26 01:00")
def test_poll_no_enforcements(self):
sensor = self.get_sensor_instance()
sensor.st2_fqdn = 'st2_test'
sensor.kv_sensor_name = 'test_sensor'
sensor.kv_enforcements = {}
trigger_attributes = {
'type': "core.st2.CronTimer",
'parameters': {
'day_of_week': '*',
'hour': 1,
'minute': 0,
'second': 0,
'timezone': 'UTC'
}
}
mock_rule = mock.Mock(ref='test_rule', trigger=trigger_attributes)
mock_st2_client = mock.MagicMock()
mock_st2_client.rules.query.return_value = [mock_rule]
mock_st2_client.ruleenforcements.query.return_value = []
sensor.st2_client = mock_st2_client
trigger_payload = {
'st2_rule_name': 'test_rule',
'st2_server': 'st2_test',
'st2_execution_id': '',
'st2_comments': 'Cron job is not running and no enforcements can be found',
'st2_state': 'open'
}
sensor.poll()
self.assertTriggerDispatched(trigger='errors.error_cron_event',
payload=trigger_payload)
self.assertEqual(sensor.kv_enforcements, {'test_rule': 'error without enforcement id'})
def test_check_enforcements_date_error(self):
sensor = self.get_sensor_instance()
sensor.st2_fqdn = 'st2_test'
sensor.kv_enforcements = {}
mock_enforcement = mock.Mock(id='test_id',
enforced_at='2018-10-20T01:00:00.01Z',
execution_id='test_execution',
rule={'ref': 'test_rule'})
test_dict = {
'previous_cron': datetime.datetime(2018, 10, 26, 1, 0).replace(tzinfo=pytz.UTC),
'next_cron': datetime.datetime(2018, 10, 27, 1, 0).replace(tzinfo=pytz.UTC),
'enforcements': [mock_enforcement]
}
trigger_payload = {
'st2_rule_name': 'test_rule',
'st2_server': 'st2_test',
'st2_execution_id': '',
'st2_comments': 'Cron job did not run',
'st2_state': 'open'
}
result_value = sensor.check_enforcements(**test_dict)
self.assertEqual(result_value, False)
self.assertTriggerDispatched(trigger='errors.error_cron_event',
payload=trigger_payload)
self.assertEqual(sensor.kv_enforcements, {'test_rule': 'error without enforcement id'})
def test_check_enforcements_error(self):
sensor = self.get_sensor_instance()
sensor.st2_fqdn = 'st2_test'
sensor.kv_enforcements = {}
mock_enforcement = mock.Mock(id='test_id',
enforced_at='2018-10-26T01:00:00.01Z',
execution_id='test_execution',
rule={'ref': 'test_rule'})
mock_execution = mock.Mock(status='failed')
mock_st2_client = mock.MagicMock()
mock_st2_client.liveactions.get_by_id.return_value = mock_execution
sensor.st2_client = mock_st2_client
test_dict = {
'previous_cron': datetime.datetime(2018, 10, 26, 1, 0).replace(tzinfo=pytz.UTC),
'next_cron': datetime.datetime(2018, 10, 27, 1, 0).replace(tzinfo=pytz.UTC),
'enforcements': [mock_enforcement]
}
trigger_payload = {
'st2_rule_name': 'test_rule',
'st2_server': 'st2_test',
'st2_execution_id': 'test_execution',
'st2_comments': 'Cronjob execution failed',
'st2_state': 'error'
}
result_value = sensor.check_enforcements(**test_dict)
self.assertEqual(result_value, False)
self.assertTriggerDispatched(trigger='errors.error_cron_event',
payload=trigger_payload)
self.assertEqual(sensor.kv_enforcements, {'test_rule': 'test_id'})
def test_check_enforcements_error_no_dispatch(self):
sensor = self.get_sensor_instance()
sensor.st2_fqdn = 'st2_test'
sensor.kv_enforcements = {'test_rule': 'test_id'}
mock_enforcement = mock.Mock(id='test_id',
enforced_at='2018-10-26T01:00:00.01Z',
execution_id='test_execution',
rule={'ref': 'test_rule'})
mock_execution = mock.Mock(status='failed')
mock_st2_client = mock.MagicMock()
mock_st2_client.liveactions.get_by_id.return_value = mock_execution
sensor.st2_client = mock_st2_client
test_dict = {
'previous_cron': datetime.datetime(2018, 10, 26, 1, 0).replace(tzinfo=pytz.UTC),
'next_cron': datetime.datetime(2018, 10, 27, 1, 0).replace(tzinfo=pytz.UTC),
'enforcements': [mock_enforcement]
}
result_value = sensor.check_enforcements(**test_dict)
self.assertEqual(result_value, False)
self.assertEqual(sensor.kv_enforcements, {'test_rule': 'test_id'})
def test_check_enforcements_error_running(self):
sensor = self.get_sensor_instance()
sensor.st2_fqdn = 'st2_test'
sensor.kv_enforcements = {'test_rule': 'test_id'}
mock_enforcement = mock.Mock(id='test_id',
enforced_at='2018-10-26T01:00:00.01Z',
execution_id='test_execution',
rule={'ref': 'test_rule'})
mock_execution = mock.Mock(status='running')
mock_st2_client = mock.MagicMock()
mock_st2_client.liveactions.get_by_id.return_value = mock_execution
sensor.st2_client = mock_st2_client
test_dict = {
'previous_cron': datetime.datetime(2018, 10, 26, 1, 0).replace(tzinfo=pytz.UTC),
'next_cron': datetime.datetime(2018, 10, 27, 1, 0).replace(tzinfo=pytz.UTC),
'enforcements': [mock_enforcement]
}
result_value = sensor.check_enforcements(**test_dict)
self.assertEqual(result_value, False)
self.assertEqual(sensor.kv_enforcements, {'test_rule': 'test_id'})
def test_check_enforcements_success_dispatch(self):
sensor = self.get_sensor_instance()
sensor.st2_fqdn = 'st2_test'
sensor.kv_enforcements = {'test_rule': 'test_id'}
mock_enforcement = mock.Mock(id='test_id',
enforced_at='2018-10-26T01:00:00.01Z',
execution_id='test_execution',
rule={'ref': 'test_rule'})
mock_execution = mock.Mock(status='success')
mock_st2_client = mock.MagicMock()
mock_st2_client.liveactions.get_by_id.return_value = mock_execution
sensor.st2_client = mock_st2_client
test_dict = {
'previous_cron': datetime.datetime(2018, 10, 26, 1, 0).replace(tzinfo=pytz.UTC),
'next_cron': datetime.datetime(2018, 10, 27, 1, 0).replace(tzinfo=pytz.UTC),
'enforcements': [mock_enforcement]
}
trigger_payload = {
'st2_rule_name': 'test_rule',
'st2_server': 'st2_test',
'st2_execution_id': 'test_execution',
'st2_comments': 'Cronjob ran successfully',
'st2_state': 'success'
}
result_value = sensor.check_enforcements(**test_dict)
self.assertEqual(result_value, True)
self.assertEqual(sensor.kv_enforcements, {'test_rule': 'test_id'})
self.assertTriggerDispatched(trigger='errors.error_cron_event',
payload=trigger_payload)
def test_check_enforcements_no_execution(self):
sensor = self.get_sensor_instance()
sensor.st2_fqdn = 'st2_test'
sensor.kv_enforcements = {}
mock_enforcement = mock.Mock(spec=True,
id="test_id",
enforced_at='2018-10-26T01:00:00.01Z',
rule={'ref': 'test_rule'})
mock_rule_enforcement = mock.Mock(id="test_id",
failure_reason="test_failure")
mock_st2_client = mock.MagicMock()
mock_st2_client.ruleenforcements.get_by_id.return_value = mock_rule_enforcement
sensor.st2_client = mock_st2_client
test_dict = {
'previous_cron': datetime.datetime(2018, 10, 26, 1, 0).replace(tzinfo=pytz.UTC),
'next_cron': datetime.datetime(2018, 10, 27, 1, 0).replace(tzinfo=pytz.UTC),
'enforcements': [mock_enforcement]
}
trigger_payload = {
'st2_rule_name': 'test_rule',
'st2_server': 'st2_test',
'st2_execution_id': '',
'st2_comments': 'test_failure',
'st2_state': 'error'
}
result_value = sensor.check_enforcements(**test_dict)
self.assertEqual(result_value, False)
self.assertTriggerDispatched(trigger='errors.error_cron_event',
payload=trigger_payload)
self.assertEqual(sensor.kv_enforcements, {'test_rule': 'test_id'})
def test_check_enforcements_no_execution_jinja_escaping(self):
sensor = self.get_sensor_instance()
sensor.st2_fqdn = 'st2_test'
sensor.kv_enforcements = {}
mock_enforcement = mock.Mock(spec=True,
id="test_id",
enforced_at='2018-10-26T01:00:00.01Z',
rule={'ref': 'test_rule'})
mock_rule_enforcement = mock.Mock(id="test_id",
failure_reason='{{ test_failure }}')
mock_st2_client = mock.MagicMock()
mock_st2_client.ruleenforcements.get_by_id.return_value = mock_rule_enforcement
sensor.st2_client = mock_st2_client
test_dict = {
'previous_cron': datetime.datetime(2018, 10, 26, 1, 0).replace(tzinfo=pytz.UTC),
'next_cron': datetime.datetime(2018, 10, 27, 1, 0).replace(tzinfo=pytz.UTC),
'enforcements': [mock_enforcement]
}
# W605 = invalid escape sequence flake8 error that we want to ignore
trigger_payload = {
'st2_rule_name': 'test_rule',
'st2_server': 'st2_test',
'st2_execution_id': '',
'st2_comments': '\{\{ test_failure \}\}', # noqa: W605
'st2_state': 'error'
}
result_value = sensor.check_enforcements(**test_dict)
self.assertEqual(result_value, False)
self.assertTriggerDispatched(trigger='errors.error_cron_event',
payload=trigger_payload)
self.assertEqual(sensor.kv_enforcements, {'test_rule': 'test_id'})
def test_check_enforcements_true(self):
sensor = self.get_sensor_instance()
sensor.kv_enforcements = {}
mock_enforcement = mock.Mock(enforced_at='2018-10-26T01:00:00.01Z',
execution_id='test',
rule={'ref': 'test_rule'})
mock_execution = mock.Mock(status='succeeded')
mock_st2_client = mock.MagicMock()
mock_st2_client.liveactions.get_by_id.return_value = mock_execution
sensor.st2_client = mock_st2_client
test_dict = {
'previous_cron': datetime.datetime(2018, 10, 26, 1, 0).replace(tzinfo=pytz.UTC),
'next_cron': datetime.datetime(2018, 10, 27, 1, 0).replace(tzinfo=pytz.UTC),
'enforcements': [mock_enforcement]
}
result_value = sensor.check_enforcements(**test_dict)
self.assertEqual(result_value, True)
def test_dispatch_trigger(self):
sensor = self.get_sensor_instance()
sensor.kv_enforcements = {}
test_dict = {
'st2_rule_name': 'test_rule',
'st2_server': 'st2_test',
'st2_execution_id': 'st2_test_execution',
'st2_comments': 'test_comments',
'st2_state': 'open'
}
sensor.dispatch_trigger(**test_dict)
self.assertTriggerDispatched(trigger='errors.error_cron_event',
payload=test_dict)
def test_check_before_dispatch_no_keys(self):
sensor = self.get_sensor_instance()
sensor.kv_enforcements = {}
test_dict = {
'st2_rule_name': 'test_rule',
'st2_enforcement_id': None
}
result_value = sensor.check_before_dispatch(**test_dict)
self.assertEqual(result_value, True)
def test_check_before_dispatch_no_keys_with_enforcement(self):
sensor = self.get_sensor_instance()
sensor.kv_enforcements = {}
test_dict = {
'st2_rule_name': 'test_rule',
'st2_enforcement_id': 'test_enforcement'
}
result_value = sensor.check_before_dispatch(**test_dict)
self.assertEqual(result_value, True)
def test_check_before_dispatch_enforcement_id(self):
sensor = self.get_sensor_instance()
sensor.kv_enforcements = {'test_rule': 'test_enforcement'}
test_dict = {
'st2_rule_name': 'test_rule',
'st2_enforcement_id': 'test_enforcement'
}
result_value = sensor.check_before_dispatch(**test_dict)
self.assertEqual(result_value, False)
def test_check_before_dispatch_no_enforcement_id(self):
sensor = self.get_sensor_instance()
sensor.kv_enforcements = {'test_rule': 'error without enforcement id'}
test_dict = {
'st2_rule_name': 'test_rule',
'st2_enforcement_id': None
}
result_value = sensor.check_before_dispatch(**test_dict)
self.assertEqual(result_value, False)
def test_delete_from_kv(self):
sensor = self.get_sensor_instance()
sensor.kv_enforcements = {'test_rule': 'test_id', 'test_rule_2': 'test_id_2'}
result_value = sensor.delete_from_kv('test_rule')
self.assertEqual(result_value, {'test_rule_2': 'test_id_2'})
def test_get_cron_rules(self):
sensor = self.get_sensor_instance()
first_name_property = mock.PropertyMock(return_value='test1')
mock_rule1 = mock.Mock(trigger={'type': "core.st2.CronTimer"})
type(mock_rule1).name = first_name_property
second_name_property = mock.PropertyMock(return_value='test2')
mock_rule2 = mock.Mock(trigger={'type': "core.st2.CronTimer"})
type(mock_rule2).name = second_name_property
third_name_property = mock.PropertyMock(return_value='test3')
mock_rule3 = mock.Mock(trigger={'type': "not_cron"})
type(mock_rule3).name = third_name_property
mock_st2_client = mock.MagicMock()
mock_st2_client.rules.query.return_value = [mock_rule1, mock_rule2, mock_rule3]
sensor.st2_client = mock_st2_client
result_value = sensor.get_cron_rules()
self.assertEqual(result_value, [mock_rule1, mock_rule2])
def test_convert_to_crontab_all(self):
sensor = self.get_sensor_instance()
test_dict = {
'day_of_week': '*',
'second': '*',
'minute': '*',
'hour': '*',
'day': '*',
'month': '*',
'year': '*'
}
expected_return = '* * * * * * *'
result_value = sensor.convert_to_crontab(test_dict)
self.assertEqual(result_value, expected_return)
def test_convert_to_crontab_missing(self):
sensor = self.get_sensor_instance()
test_dict = {
'day_of_week': '*',
'second': '*',
'minute': '*',
'day': '*',
'year': '*'
}
expected_return = '* * * * * * *'
result_value = sensor.convert_to_crontab(test_dict)
self.assertEqual(result_value, expected_return)
def test_convert_to_crontab_day_convert(self):
sensor = self.get_sensor_instance()
test_dict = {
'day_of_week': 3,
'second': '*',
'minute': '*',
'month': '*',
}
expected_return = '* * * * * 4 *'
result_value = sensor.convert_to_crontab(test_dict)
self.assertEqual(result_value, expected_return)
| 38.940816
| 95
| 0.597243
| 2,092
| 19,081
| 5.114723
| 0.105641
| 0.035327
| 0.038879
| 0.039065
| 0.827944
| 0.819813
| 0.788598
| 0.781215
| 0.775514
| 0.751215
| 0
| 0.036055
| 0.292123
| 19,081
| 489
| 96
| 39.02045
| 0.756126
| 0.034327
| 0
| 0.715385
| 0
| 0
| 0.1566
| 0.019989
| 0
| 0
| 0
| 0
| 0.087179
| 1
| 0.05641
| false
| 0
| 0.020513
| 0
| 0.084615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
24d93e96c5b7c5414103a9b0c1d7d1229a830e4f
| 153
|
py
|
Python
|
odpt2jre/intermediate_components/company.py
|
friuli-jokyo/python-odpt2jre
|
667c3a018d06d7e062f0e984dfd2323490e8dfb9
|
[
"MIT"
] | null | null | null |
odpt2jre/intermediate_components/company.py
|
friuli-jokyo/python-odpt2jre
|
667c3a018d06d7e062f0e984dfd2323490e8dfb9
|
[
"MIT"
] | null | null | null |
odpt2jre/intermediate_components/company.py
|
friuli-jokyo/python-odpt2jre
|
667c3a018d06d7e062f0e984dfd2323490e8dfb9
|
[
"MIT"
] | null | null | null |
from .multi_language_expression import MultiLanguageExpressionWithTable
class CompanyName(MultiLanguageExpressionWithTable, header="Company"):
pass
| 30.6
| 71
| 0.862745
| 12
| 153
| 10.833333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084967
| 153
| 5
| 72
| 30.6
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
70072ee2abb0c1aaee1017d0d13ae16aaf56dd33
| 3,716
|
py
|
Python
|
forms/migrations/0018_auto_20150416_0855.py
|
digideskio/gmmp
|
d82a4be0787c3a3a9e27dc590d7974f9f884fbb6
|
[
"Apache-2.0"
] | 4
|
2020-01-05T09:14:19.000Z
|
2022-02-17T03:22:09.000Z
|
forms/migrations/0018_auto_20150416_0855.py
|
digideskio/gmmp
|
d82a4be0787c3a3a9e27dc590d7974f9f884fbb6
|
[
"Apache-2.0"
] | 68
|
2019-12-23T02:19:55.000Z
|
2021-04-23T06:13:36.000Z
|
forms/migrations/0018_auto_20150416_0855.py
|
OpenUpSA/gmmp
|
d82a4be0787c3a3a9e27dc590d7974f9f884fbb6
|
[
"Apache-2.0"
] | 2
|
2019-07-25T11:53:10.000Z
|
2020-06-22T02:07:40.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('forms', '0017_auto_20150331_1815'),
]
operations = [
migrations.AlterField(
model_name='internetnewssheet',
name='webpage_layer_no',
field=models.PositiveIntegerField(help_text='Webpage Layer Number. Homepage=1, One click away=2, Five clicks away= 5, etc. Note that if a story appears on the front page, code with 1', verbose_name='(1) Webpage Layer Number'),
preserve_default=True,
),
migrations.AlterField(
model_name='newspapersheet',
name='page_number',
field=models.PositiveIntegerField(help_text='Write in the number of the page on which the story begins. Story appears on first page = 1, Seventh page = 7, etc.', verbose_name='(1) Page Number'),
preserve_default=True,
),
migrations.AlterField(
model_name='radiosheet',
name='item_number',
field=models.PositiveIntegerField(help_text='Write in the number that describes the position of the story within the newscast. E.g. the first story in the newscast is item 1; the seventh story is item 7.', verbose_name='(1) Item Number'),
preserve_default=True,
),
migrations.AlterField(
model_name='radiosheet',
name='num_female_anchors',
field=models.PositiveIntegerField(help_text='The anchor (or announcer, or presenter) is the person who introduces the newscast and the individual items within it. <strong>Note: You should only include the anchors/announcers. Do not include reporters or other', verbose_name='Number of female anchors'),
preserve_default=True,
),
migrations.AlterField(
model_name='radiosheet',
name='num_male_anchors',
field=models.PositiveIntegerField(help_text='The anchor (or announcer, or presenter) is the person who introduces the newscast and the individual items within it. <strong>Note: You should only include the anchors/announcers. Do not include reporters or other journalists</strong>', verbose_name='Number of male anchors'),
preserve_default=True,
),
migrations.AlterField(
model_name='televisionsheet',
name='item_number',
field=models.PositiveIntegerField(help_text='Write in the number that describes the position of the story within the newscast. E.g. the first story in the newscast is item 1; the seventh story is item 7.', verbose_name='(1) Item Number'),
preserve_default=True,
),
migrations.AlterField(
model_name='televisionsheet',
name='num_female_anchors',
field=models.PositiveIntegerField(help_text='The anchor (or announcer, or presenter) is the person who introduces the newscast and the individual items within it. <strong>Note: You should only include the anchors/announcers. Do not include reporters or other', verbose_name='Number of female anchors'),
preserve_default=True,
),
migrations.AlterField(
model_name='televisionsheet',
name='num_male_anchors',
field=models.PositiveIntegerField(help_text='The anchor (or announcer, or presenter) is the person who introduces the newscast and the individual items within it. <strong>Note: You should only include the anchors/announcers. Do not include reporters or other journalists</strong>', verbose_name='Number of male anchors'),
preserve_default=True,
),
]
| 58.984127
| 333
| 0.680571
| 454
| 3,716
| 5.453744
| 0.229075
| 0.06462
| 0.080775
| 0.0937
| 0.81664
| 0.800889
| 0.800889
| 0.800889
| 0.779079
| 0.779079
| 0
| 0.010912
| 0.235468
| 3,716
| 62
| 334
| 59.935484
| 0.860612
| 0.005651
| 0
| 0.75
| 0
| 0.142857
| 0.507176
| 0.006228
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035714
| 0
| 0.089286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
702c96a1de8cf99f2330f37ffbd8f776d4fd3422
| 62,268
|
py
|
Python
|
edge-bootstrap/python/edgectl/test/host/test_dockerclient.py
|
CIPop/iotedge
|
401b6d19effbb2d5f347434ce0dc01599cefe93e
|
[
"MIT"
] | 3
|
2018-12-27T18:15:15.000Z
|
2020-02-12T05:23:09.000Z
|
edge-bootstrap/python/edgectl/test/host/test_dockerclient.py
|
CIPop/iotedge
|
401b6d19effbb2d5f347434ce0dc01599cefe93e
|
[
"MIT"
] | 2
|
2018-12-28T04:48:34.000Z
|
2019-01-15T21:11:30.000Z
|
edge-bootstrap/python/edgectl/test/host/test_dockerclient.py
|
CIPop/iotedge
|
401b6d19effbb2d5f347434ce0dc01599cefe93e
|
[
"MIT"
] | 2
|
2018-11-06T23:54:28.000Z
|
2019-04-03T06:38:47.000Z
|
"""Implementation of tests for module `edgectl.deployment.deploymentdocker.py`."""
from __future__ import print_function
import sys
import unittest
import os
from mock import mock, patch, mock_open, MagicMock, PropertyMock
import docker
import edgectl.errors
from edgectl.host.dockerclient import EdgeDockerClient
if sys.version_info[0] < 3:
OPEN_BUILTIN = '__builtin__.open'
else:
OPEN_BUILTIN = 'builtins.open'
# pylint: disable=invalid-name
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
# pylint: disable=too-many-lines
# pylint: disable=no-self-use
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-arguments
class TestEdgeDockerClientCheckAvailability(unittest.TestCase):
"""Unit tests for API EdgeDockerClient.check_availability"""
@mock.patch('docker.DockerClient', autospec=True)
def test_check_availability_valid(self, mock_docker_client):
"""
Tests call stack when API check_availability returns true
"""
# arrange
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
result = client.check_availability()
# assert
mock_docker_client.info.assert_called_with()
self.assertTrue(result)
@mock.patch('docker.DockerClient', autospec=True)
def test_check_availability_invalid(self, mock_docker_client):
"""
Tests call stack when API check_availability returns false
"""
# arrange
mock_docker_client.info.side_effect = docker.errors.APIError('docker unavailable')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
result = client.check_availability()
# assert
mock_docker_client.info.assert_called_with()
self.assertFalse(result)
class TestEdgeDockerClientLogin(unittest.TestCase):
"""Unit tests for API EdgeDockerClient.login"""
@mock.patch('docker.DockerClient', autospec=True)
def test_login_valid(self, mock_docker_client):
"""
Tests call stack when API login is called using valid input arguments
"""
# arrange
client = EdgeDockerClient.create_instance(mock_docker_client)
address = 'test_address'
uname = 'test_user'
password = 'test_pass'
# act
client.login(address, uname, password)
# assert
mock_docker_client.login.assert_called_with(username=uname,
password=password, registry=address)
@mock.patch('docker.DockerClient', autospec=True)
def test_login_fails(self, mock_docker_client):
"""
Tests call stack when docker login raises an exception
"""
# arrange
mock_docker_client.login.side_effect = docker.errors.APIError('login fails')
client = EdgeDockerClient.create_instance(mock_docker_client)
address = 'test_address'
uname = 'test_user'
password = 'test_pass'
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.login(address, uname, password)
class TestEdgeDockerClientGetOSType(unittest.TestCase):
"""Unit tests for API EdgeDockerClient.get_os_type"""
@mock.patch('docker.DockerClient', autospec=True)
def test_get_os_valid(self, mock_docker_client):
"""
Tests call stack when docker client API info returns a valid OSType
"""
# arrange
os_type = 'TEST_OS'
mock_docker_client.info.return_value = {'OSType': os_type}
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
result = client.get_os_type()
# assert
mock_docker_client.info.assert_called_with()
self.assertEqual(result, os_type.lower())
@mock.patch('docker.DockerClient', autospec=True)
def test_get_os_fails(self, mock_docker_client):
"""
Tests call stack when docker info raises an exception
"""
# arrange
mock_docker_client.info.side_effect = docker.errors.APIError('info fails')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeError):
client.get_os_type()
class TestEdgeDockerClientGetLocalImageSHAId(unittest.TestCase):
"""Unit tests for API EdgeDockerClient.get_local_image_sha_id"""
@mock.patch('docker.APIClient', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_get_local_image_sha_id_valid(self, mock_docker_client, mock_docker_api_client):
"""
Tests call stack when docker client API inspect_image returns a valid id
"""
# arrange
test_id = '1234'
mock_docker_api_client.inspect_image.return_value = {'Id': test_id}
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
client = EdgeDockerClient.create_instance(mock_docker_client)
image = 'test_image'
# act
result = client.get_local_image_sha_id(image)
# assert
mock_docker_api_client.inspect_image.assert_called_with(image)
self.assertEqual(result, test_id)
@mock.patch('docker.APIClient', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_get_local_image_sha_id_fails(self, mock_docker_client, mock_docker_api_client):
"""
Tests call stack when docker api inspect_image raises an exception
"""
# arrange
mock_docker_api_client.inspect_image.side_effect = docker.errors.APIError('inspect fails')
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
client = EdgeDockerClient.create_instance(mock_docker_client)
image = 'test_image'
# act
result = client.get_local_image_sha_id(image)
# assert
mock_docker_api_client.inspect_image.assert_called_with(image)
self.assertEqual(result, None)
class TestEdgeDockerClientPull(unittest.TestCase):
"""Unit tests for API EdgeDockerClient.pull"""
@mock.patch('edgectl.host.dockerclient.EdgeDockerClient.get_local_image_sha_id')
@mock.patch('docker.APIClient', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_pull_image_exists_locally_with_no_newer_image_valid(self,
mock_docker_client,
mock_docker_api_client,
mock_get_local_id):
"""
Tests call stack when docker client pull is called with a locally available image
and no newer image available in the registry
"""
# arrange
test_id = '1234'
mock_get_local_id.return_value = test_id
mock_docker_api_client.inspect_image.return_value = {'Id': test_id}
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
client = EdgeDockerClient.create_instance(mock_docker_client)
image = 'test_image'
username = "test_user"
password = "test_password"
auth_dict = {'username': username, 'password': password}
# act
result = client.pull(image, username, password)
# assert
mock_get_local_id.assert_called_with(image)
mock_docker_api_client.inspect_image.assert_called_with(image)
mock_docker_client.images.pull.assert_called_with(image, auth_config=auth_dict)
self.assertFalse(result)
@mock.patch('edgectl.host.dockerclient.EdgeDockerClient.get_local_image_sha_id')
@mock.patch('docker.APIClient', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_pull_image_exists_locally_with_newer_image_valid(self,
mock_docker_client,
mock_docker_api_client,
mock_get_local_id):
"""
Tests call stack when docker client pull is called with a locally available image
and a newer image available in the registry
"""
# arrange
test_id = '1234'
mock_get_local_id.return_value = '1000'
mock_docker_api_client.inspect_image.return_value = {'Id': test_id}
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
client = EdgeDockerClient.create_instance(mock_docker_client)
image = 'test_image'
username = "test_user"
password = "test_password"
auth_dict = {'username': username, 'password': password}
# act
result = client.pull(image, username, password)
# assert
mock_get_local_id.assert_called_with(image)
mock_docker_api_client.inspect_image.assert_called_with(image)
mock_docker_client.images.pull.assert_called_with(image, auth_config=auth_dict)
self.assertTrue(result)
@mock.patch('edgectl.host.dockerclient.EdgeDockerClient.get_local_image_sha_id')
@mock.patch('docker.APIClient', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_pull_image_exists_locally_with_newer_image_no_credentials_valid(self,
mock_docker_client,
mock_docker_api_client,
mock_get_local_id):
"""
Tests call stack when docker client pull is called with a locally available image
and no newer image available in the registry to be accessed without any credentials
"""
# arrange
test_id = '1234'
mock_get_local_id.return_value = '1000'
mock_docker_api_client.inspect_image.return_value = {'Id': test_id}
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
client = EdgeDockerClient.create_instance(mock_docker_client)
image = 'test_image'
auth_dict = None
# act
result = client.pull(image, None, None)
# assert
mock_get_local_id.assert_called_with(image)
mock_docker_api_client.inspect_image.assert_called_with(image)
mock_docker_client.images.pull.assert_called_with(image, auth_config=auth_dict)
self.assertTrue(result)
@mock.patch('edgectl.host.dockerclient.EdgeDockerClient.get_local_image_sha_id')
@mock.patch('docker.APIClient', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_pull_image_no_image_exists_locally(self,
mock_docker_client,
mock_docker_api_client,
mock_get_local_id):
"""
Tests call stack when docker client pull is called with no locally available image
"""
# arrange
test_id = '1234'
mock_get_local_id.return_value = None
mock_docker_api_client.inspect_image.return_value = {'Id': test_id}
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
client = EdgeDockerClient.create_instance(mock_docker_client)
image = 'test_image'
username = "test_user"
password = "test_password"
auth_dict = {'username': username, 'password': password}
# act
result = client.pull(image, username, password)
# assert
mock_get_local_id.assert_called_with(image)
mock_docker_api_client.inspect_image.assert_not_called()
mock_docker_client.images.pull.assert_called_with(image, auth_config=auth_dict)
self.assertTrue(result)
@mock.patch('edgectl.host.dockerclient.EdgeDockerClient.get_local_image_sha_id')
@mock.patch('docker.APIClient', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_pull_raises_exception(self,
mock_docker_client,
mock_docker_api_client,
mock_get_local_id):
"""
Tests call stack when docker client pull raises exception
"""
# arrange
test_id = '1234'
mock_get_local_id.return_value = None
mock_docker_api_client.inspect_image.return_value = {'Id': test_id}
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
mock_docker_client.images.pull.side_effect = docker.errors.APIError('docker unavailable')
client = EdgeDockerClient.create_instance(mock_docker_client)
image = 'test_image'
username = "test_user"
password = "test_password"
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.pull(image, username, password)
class TestContainerSpec(docker.models.containers.Container):
"""
Class used in mock autospec for containers
"""
name = 'name'
status = 'status'
def stop(self, **kwargs):
""" Mock stop method """
pass
def start(self, **kwargs):
""" Mock start method """
pass
def remove(self, **kwargs):
""" Mock remove method """
pass
class TestEdgeDockerContainerOps(unittest.TestCase):
"""
Unit tests for APIs
EdgeDockerClient.start
EdgeDockerClient.restart
EdgeDockerClient.stop
EdgeDockerClient.remove
EdgeDockerClient.status
EdgeDockerClient.stop_by_label
EdgeDockerClient.remove_by_label
EdgeDockerClient.create
"""
TEST_CONTAINER_NAME = 'test_name'
TEST_LABEL = 'test_label'
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_start_valid(self, mock_docker_client, mock_container):
"""
Tests execution of a valid start command
"""
# arrange
mock_docker_client.containers.get.return_value = mock_container
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.start(self.TEST_CONTAINER_NAME)
# assert
mock_docker_client.containers.get.assert_called_with(self.TEST_CONTAINER_NAME)
mock_container.start.assert_called_with()
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_start_fails_raises_exception(self, mock_docker_client, mock_container):
"""
Tests whether EdgeDeploymentError is raised when docker container start fails
"""
# arrange
mock_container.start.side_effect = docker.errors.APIError('start failure')
mock_docker_client.containers.get.return_value = mock_container
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.start(self.TEST_CONTAINER_NAME)
@mock.patch('docker.DockerClient', autospec=True)
def test_start_invalid_container_raises_exception(self, mock_docker_client):
"""
Tests whether EdgeDeploymentError is raised when docker container start fails
"""
# arrange
mock_docker_client.containers.get.side_effect = docker.errors.NotFound('invalid image')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.start(self.TEST_CONTAINER_NAME)
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_restart_valid(self, mock_docker_client, mock_container):
"""
Tests execution of a valid restart command
"""
# arrange
mock_docker_client.containers.get.return_value = mock_container
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.restart(self.TEST_CONTAINER_NAME)
# assert
mock_docker_client.containers.get.assert_called_with(self.TEST_CONTAINER_NAME)
mock_container.restart.assert_called_with(timeout=5)
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_restart_with_args_valid(self, mock_docker_client, mock_container):
"""
Tests execution of a valid restart command with args
"""
# arrange
mock_docker_client.containers.get.return_value = mock_container
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.restart(self.TEST_CONTAINER_NAME, timeout_int=50)
# assert
mock_docker_client.containers.get.assert_called_with(self.TEST_CONTAINER_NAME)
mock_container.restart.assert_called_with(timeout=50)
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_restart_fails_raises_exception(self, mock_docker_client, mock_container):
"""
Tests whether EdgeDeploymentError is raised when docker container restart fails
"""
# arrange
mock_container.restart.side_effect = docker.errors.APIError('restart failure')
mock_docker_client.containers.get.return_value = mock_container
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.restart(self.TEST_CONTAINER_NAME)
@mock.patch('docker.DockerClient', autospec=True)
def test_restart_invalid_container_raises_exception(self, mock_docker_client):
"""
Tests whether EdgeDeploymentError is raised when docker container restart fails
"""
# arrange
mock_docker_client.containers.get.side_effect = docker.errors.NotFound('invalid image')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.restart(self.TEST_CONTAINER_NAME)
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_stop_valid(self, mock_docker_client, mock_container):
"""
Tests execution of a valid stop command
"""
# arrange
mock_docker_client.containers.get.return_value = mock_container
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.stop(self.TEST_CONTAINER_NAME)
# assert
mock_docker_client.containers.get.assert_called_with(self.TEST_CONTAINER_NAME)
mock_container.stop.assert_called_with()
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_stop_fails_raises_exception(self, mock_docker_client, mock_container):
"""
Tests whether EdgeDeploymentError is raised when docker container stop fails
"""
# arrange
mock_container.stop.side_effect = docker.errors.APIError('stop failure')
mock_docker_client.containers.get.return_value = mock_container
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.stop(self.TEST_CONTAINER_NAME)
@mock.patch('docker.DockerClient', autospec=True)
def test_stop_invalid_container_raises_exception(self, mock_docker_client):
"""
Tests whether EdgeDeploymentError is raised when docker container stop fails
"""
# arrange
mock_docker_client.containers.get.side_effect = docker.errors.NotFound('invalid image')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.stop(self.TEST_CONTAINER_NAME)
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_remove_valid(self, mock_docker_client, mock_container):
"""
Tests execution of a valid remove command
"""
# arrange
mock_docker_client.containers.get.return_value = mock_container
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.remove(self.TEST_CONTAINER_NAME)
# assert
mock_docker_client.containers.get.assert_called_with(self.TEST_CONTAINER_NAME)
mock_container.remove.assert_called_with()
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_remove_fails_raises_exception(self, mock_docker_client, mock_container):
"""
Tests whether EdgeDeploymentError is raised when docker container remove fails
"""
# arrange
mock_container.remove.side_effect = docker.errors.APIError('remove failure')
mock_docker_client.containers.get.return_value = mock_container
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.remove(self.TEST_CONTAINER_NAME)
@mock.patch('docker.DockerClient', autospec=True)
def test_remove_invalid_container_raises_exception(self, mock_docker_client):
"""
Tests whether EdgeDeploymentError is raised when docker container remove fails
"""
# arrange
mock_docker_client.containers.get.side_effect = docker.errors.NotFound('invalid image')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.remove(self.TEST_CONTAINER_NAME)
@mock.patch('docker.models.containers.Container', autospec=TestContainerSpec)
@mock.patch('docker.models.containers.Container', autospec=TestContainerSpec)
@mock.patch('docker.DockerClient', autospec=True)
def test_status_valid(self, mock_docker_client, mock_container, mock_non_match_container):
"""
Tests execution of a valid status command
"""
# @note when setting the container object using autospec=True, it could not
# set the properties name and status of the mock object.
# Thus we resort to using TestContainerSpec as the autospec where these are
# settable. It should be noted that for the status test it was sufficient to use
# @mock.patch('docker.models.containers.Container') directly but we are using
# TestContainerSpec for consistency
# arrange
test_status = 'running'
type(mock_container).status = PropertyMock(return_value=test_status)
type(mock_container).name = PropertyMock(return_value=self.TEST_CONTAINER_NAME)
type(mock_non_match_container).status = PropertyMock(return_value='running')
type(mock_non_match_container).name = PropertyMock(return_value='blah')
mock_docker_client.containers.list.return_value = [mock_non_match_container, mock_container]
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
result = client.status(self.TEST_CONTAINER_NAME)
# assert
mock_docker_client.containers.list.assert_called_with(all=True)
self.assertEqual(test_status, result)
@mock.patch('docker.DockerClient', autospec=True)
def test_status_raises_exception(self, mock_docker_client):
"""
Tests whether EdgeDeploymentError is raised when docker containers list fails
"""
# arrange
mock_docker_client.containers.list.side_effect = docker.errors.APIError('list failure')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.status(self.TEST_CONTAINER_NAME)
@mock.patch('docker.models.containers.Container', autospec=TestContainerSpec)
@mock.patch('docker.models.containers.Container', autospec=TestContainerSpec)
@mock.patch('docker.DockerClient', autospec=True)
def test_stop_by_label_valid(self, mock_docker_client,
mock_container1, mock_container2):
"""
Tests execution of a valid stop by label command
"""
# @note when setting multiple container mocks autospec=True failed which is
# why we resort to using TestContainerSpec as the autospec class
# arrange
mock_docker_client.containers.list.return_value = [mock_container1, mock_container2]
client = EdgeDockerClient.create_instance(mock_docker_client)
filter_dict = {'label': self.TEST_LABEL}
# act
client.stop_by_label(self.TEST_LABEL)
# assert
mock_docker_client.containers.list.assert_called_with(all=True, filters=filter_dict)
mock_container1.stop.assert_called_with()
mock_container2.stop.assert_called_with()
@mock.patch('docker.DockerClient', autospec=True)
def test_stop_by_label_raises_exception(self, mock_docker_client):
"""
Tests whether EdgeDeploymentError is raised when docker containers list fails
"""
# arrange
mock_docker_client.containers.list.side_effect = docker.errors.APIError('list failure')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.stop_by_label(self.TEST_LABEL)
@mock.patch('docker.models.containers.Container', autospec=TestContainerSpec)
@mock.patch('docker.models.containers.Container', autospec=TestContainerSpec)
@mock.patch('docker.DockerClient', autospec=True)
def test_remove_by_label_valid(self, mock_docker_client,
mock_container1, mock_container2):
"""
Tests execution of a valid remove by label command
"""
# @note when setting multiple container mocks autospec=True failed which is
# why we resort to using TestContainerSpec as the autospec class
# arrange
mock_docker_client.containers.list.return_value = [mock_container1, mock_container2]
client = EdgeDockerClient.create_instance(mock_docker_client)
filter_dict = {'label': self.TEST_LABEL}
# act
client.remove_by_label(self.TEST_LABEL)
# assert
mock_docker_client.containers.list.assert_called_with(all=True, filters=filter_dict)
mock_container1.remove.assert_called_with()
mock_container2.remove.assert_called_with()
@mock.patch('docker.DockerClient', autospec=True)
def test_remove_by_label_raises_exception(self, mock_docker_client):
"""
Tests whether EdgeDeploymentError is raised when docker containers list fails
"""
# arrange
mock_docker_client.containers.list.side_effect = docker.errors.APIError('list failure')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.remove_by_label(self.TEST_LABEL)
@mock.patch('docker.DockerClient', autospec=True)
def test_create_valid(self, mock_docker_client):
"""
Tests execution of a valid docker create container command
"""
# arrange
image = 'test_image'
container_name = 'test_name'
detach_bool = True
env_dict = {'test_key_env': 'test_val_env'}
nw_name = 'test_network_name'
ports_dict = {'test_key_ports': 'test_val_ports'}
volume_dict = {'test_key_volume': {'bind': 'test_val_bind', 'mode': 'test_val_mode'}}
log_config_dict = {'type': 'test_val_log', 'config': {'opt1':'val1'}}
mounts_list = ['mount1', 'mount2']
restart_policy_dict = {'test_key_restart': 'test_val_restart'}
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.create(image,
name=container_name,
detach=detach_bool,
environment=env_dict,
network=nw_name,
ports=ports_dict,
volumes=volume_dict,
log_config=log_config_dict,
mounts=mounts_list,
restart_policy=restart_policy_dict)
# assert
mock_docker_client.containers.create.assert_called_with(image,
detach=detach_bool,
environment=env_dict,
name=container_name,
network=nw_name,
ports=ports_dict,
volumes=volume_dict,
log_config=log_config_dict,
mounts=mounts_list,
restart_policy=restart_policy_dict)
def _create_common_invocation(self, client):
image = 'test_image'
container_name = 'test_name'
detach_bool = True
env_dict = {'test_key_env': 'test_val_env'}
nw_name = 'test_network_name'
ports_dict = {'test_key_ports': 'test_val_ports'}
volume_dict = {'test_key_volume': {'bind': 'test_val_bind', 'mode': 'test_val_mode'}}
log_config_dict = {'type': 'test_val_log', 'config': {'opt1':'val1'}}
mounts_list = ['mount1', 'mount2']
restart_policy_dict = {'test_key_restart': 'test_val_restart'}
# act
client.create(image,
name=container_name,
detach=detach_bool,
environment=env_dict,
network=nw_name,
ports=ports_dict,
volumes=volume_dict,
log_config=log_config_dict,
mounts=mounts_list,
restart_policy=restart_policy_dict)
@mock.patch('docker.DockerClient', autospec=True)
def test_create_raises_except_when_containerError_is_raised(self,
mock_docker_client):
"""
Tests execution of create container raises exception edgectl.errors.EdgeDeploymentError
when docker client API create raises ContainerError
"""
# arrange
except_obj = docker.errors.ContainerError('container', 1, 'cmd', 'image', 'stderr')
mock_docker_client.containers.create.side_effect = except_obj
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
self._create_common_invocation(client)
@mock.patch('docker.DockerClient', autospec=True)
def test_create_raises_except_when_ImageNotFound_is_raised(self,
mock_docker_client):
"""
Tests execution of create container raises exception edgectl.errors.EdgeDeploymentError
when docker client API create raises ImageNotFound
"""
# arrange
except_obj = docker.errors.ImageNotFound('image error')
mock_docker_client.containers.create.side_effect = except_obj
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
self._create_common_invocation(client)
@mock.patch('docker.DockerClient', autospec=True)
def test_create_raises_except_when_APIError_is_raised(self,
mock_docker_client):
"""
Tests execution of create container raises exception edgectl.errors.EdgeDeploymentError
when docker client API create raises APIError
"""
# arrange
except_obj = docker.errors.APIError('image error')
mock_docker_client.containers.create.side_effect = except_obj
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
self._create_common_invocation(client)
class TestEdgeDockerNetworkCreate(unittest.TestCase):
"""Unit tests for API EdgeDockerClient.create_network"""
TEST_NETWORK = 'test_network'
@mock.patch('docker.DockerClient', autospec=True)
def test_nw_create_no_networks_exist_linux(self, mock_docker_client):
"""
Tests call stack when docker network create is called when there are no networks
available for Linux type OS.
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Linux'}
mock_docker_client.networks.list.return_value = None
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.create_network(self.TEST_NETWORK)
# assert
mock_docker_client.networks.create.assert_called_with(self.TEST_NETWORK, driver='bridge')
@mock.patch('docker.DockerClient', autospec=True)
def test_nw_create_no_networks_exist_windows(self, mock_docker_client):
"""
Tests call stack when docker network create is called when there are no networks
available for Windows type OS.
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Windows'}
mock_docker_client.networks.list.return_value = None
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.create_network(self.TEST_NETWORK)
# assert
mock_docker_client.networks.create.assert_called_with(self.TEST_NETWORK, driver='nat')
@mock.patch('docker.DockerClient', autospec=True)
def test_nw_create_other_non_matching_networks_exist(self, mock_docker_client):
"""
Tests call stack when docker network create is called when there are
other networks available that do not match the provided network name.
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Linux'}
mock_docker_client.networks.list.return_value = []
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.create_network(self.TEST_NETWORK)
# assert
mock_docker_client.networks.create.assert_called_with(self.TEST_NETWORK, driver='bridge')
@mock.patch('docker.models.networks.Network', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_nw_create_network_exists(self, mock_docker_client, mock_network):
"""
Tests call stack when docker network create is called when there are
other networks available that do not match the provided network name.
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Linux'}
mock_docker_client.networks.list.return_value = [mock_network]
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.create_network(self.TEST_NETWORK)
# assert
mock_docker_client.networks.create.assert_not_called()
@mock.patch('docker.DockerClient', autospec=True)
def test_create_network_raises_exception_when_info_fails(self, mock_docker_client):
"""
Tests whether EdgeDeploymentError is raised when docker info list fails
"""
# arrange
mock_docker_client.info.side_effect = docker.errors.APIError('info failure')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.create_network(self.TEST_NETWORK)
@mock.patch('docker.DockerClient', autospec=True)
def test_create_network_raises_exception_when_list_fails(self, mock_docker_client):
"""
Tests whether EdgeDeploymentError is raised when docker network list fails
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Linux'}
mock_docker_client.networks.list.side_effect = docker.errors.APIError('list failure')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.create_network(self.TEST_NETWORK)
@mock.patch('docker.DockerClient', autospec=True)
def test_create_network_raises_exception_when_create_fails(self, mock_docker_client):
"""
Tests whether EdgeDeploymentError is raised when docker network list fails
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Linux'}
mock_docker_client.networks.list.return_value = None
mock_docker_client.networks.create.side_effect = docker.errors.APIError('nw create failed')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.create_network(self.TEST_NETWORK)
class TestEdgeDockerVolumes(unittest.TestCase):
"""Unit tests for API EdgeDockerClient.create_volume and EdgeDockerClient.remove_volume"""
TEST_CONTAINER_NAME = 'test_container'
TEST_VOLUME_NAME = 'test_volume'
@mock.patch('docker.DockerClient', autospec=True)
def test_create_volume_when_it_does_not_exist(self, mock_docker_client):
"""
Tests call stack when docker volume create is called when the volume does not exist.
"""
# arrange
mock_docker_client.volumes.get.side_effect = docker.errors.NotFound('no volume exists')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.create_volume(self.TEST_VOLUME_NAME)
# assert
mock_docker_client.volumes.create.assert_called_with(self.TEST_VOLUME_NAME)
@mock.patch('docker.models.volumes.Volume', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_create_volume_when_volume_exits(self, mock_docker_client, mock_volume):
"""
Tests call stack when docker volume create is not called when the volume exists.
"""
# arrange
mock_docker_client.volumes.get.return_value = mock_volume
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.create_volume(self.TEST_VOLUME_NAME)
# assert
mock_docker_client.volumes.create.assert_not_called()
@mock.patch('docker.DockerClient', autospec=True)
def test_create_volume_raises_exception_when_volume_get_fails(self, mock_docker_client):
"""
Tests whether EdgeDeploymentError is raised when docker volume get fails
"""
# arrange
mock_docker_client.volumes.get.side_effect = docker.errors.APIError('volume get fails')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.create_volume(self.TEST_VOLUME_NAME)
mock_docker_client.volumes.create.assert_not_called()
@mock.patch('docker.DockerClient', autospec=True)
def test_create_volume_raises_exception_when_volume_create_fails(self, mock_docker_client):
"""
Tests whether EdgeDeploymentError is raised when docker volume create fails
"""
# arrange
mock_docker_client.volumes.get.side_effect = docker.errors.NotFound('no volume exists')
mock_docker_client.volumes.create.side_effect = docker.errors.APIError('vol create fails')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.create_volume(self.TEST_VOLUME_NAME)
mock_docker_client.volumes.create.assert_not_called()
@mock.patch('docker.models.volumes.Volume', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_remove_volume_when_volume_exits(self, mock_docker_client, mock_volume):
"""
Tests call stack when docker volume remove is called when the volume exists.
"""
# arrange
mock_docker_client.volumes.get.return_value = mock_volume
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.remove_volume(self.TEST_VOLUME_NAME)
# assert
mock_volume.remove.assert_called_with(False)
@mock.patch('docker.models.volumes.Volume', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_remove_volume_with_args_when_volume_exits(self, mock_docker_client, mock_volume):
"""
Tests call stack when docker volume remove is called when the volume exists.
"""
# arrange
mock_docker_client.volumes.get.return_value = mock_volume
client = EdgeDockerClient.create_instance(mock_docker_client)
force_flag = True
# act
client.remove_volume(self.TEST_VOLUME_NAME, force_flag)
# assert
mock_volume.remove.assert_called_with(force_flag)
@mock.patch('docker.models.volumes.Volume', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_remove_volume_raises_exception_when_volume_get_fails(self, mock_docker_client,
mock_volume):
"""
Tests whether EdgeDeploymentError is raised when docker volume get fails
"""
# arrange
mock_docker_client.volumes.get.return_value = mock_volume
mock_docker_client.volumes.get.side_effect = docker.errors.APIError('volume get fails')
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.remove_volume(self.TEST_VOLUME_NAME)
mock_volume.remove.assert_not_called()
@mock.patch('docker.models.volumes.Volume', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_remove_volume_raises_exception_when_volume_remove_fails(self,
mock_docker_client,
mock_volume):
"""
Tests whether EdgeDeploymentError is raised when docker volume remove fails
"""
# arrange
mock_volume.remove.side_effect = docker.errors.APIError('vol remove fails')
mock_docker_client.volumes.get.return_value = mock_volume
client = EdgeDockerClient.create_instance(mock_docker_client)
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.remove_volume(self.TEST_VOLUME_NAME)
mock_volume.remove.assert_called_with(True)
@mock.patch('docker.DockerClient', autospec=True)
def test_copy_file_to_volume_raises_exception_when_info_fails(self, mock_docker_client):
"""
Tests whether EdgeDeploymentError is raised when docker info fails
"""
# arrange
mock_docker_client.info.side_effect = docker.errors.APIError('info fails')
client = EdgeDockerClient.create_instance(mock_docker_client)
src_file = 'src.txt'
dest_file = 'dest.txt'
dest_dir = 'dest'
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.copy_file_to_volume(self.TEST_CONTAINER_NAME, dest_file, dest_dir, src_file)
@mock.patch('edgectl.utils.EdgeUtils.copy_files')
@mock.patch('docker.APIClient', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_copy_file_to_volume_windows_valid(self, mock_docker_client,
mock_docker_api_client, mock_copy_utils):
"""
Tests a valid invocation of copy_file_to_volume
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Windows'}
mock_docker_api_client.inspect_volume.return_value = {'Mountpoint': '\\\\some_path\\\\mount\\'}
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
client = EdgeDockerClient.create_instance(mock_docker_client)
src_file = 'src.txt'
dest_file = 'dest.txt'
dest_dir = 'dest'
# act
client.copy_file_to_volume(self.TEST_CONTAINER_NAME, dest_file, dest_dir, src_file)
# arrange
mock_docker_api_client.inspect_volume(dest_dir)
mock_copy_utils.assert_called_with(src_file, os.path.join('\\some_path\\mount\\', dest_file))
@mock.patch('docker.APIClient', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_copy_file_to_volume_raises_exception_when_vol_inspect_fails(self,
mock_docker_client,
mock_docker_api_client):
"""
Tests whether EdgeDeploymentError is raised when docker volume inspect fails
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Windows'}
mock_docker_api_client.inspect_volume.side_effect = docker.errors.APIError('inspect fails')
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
client = EdgeDockerClient.create_instance(mock_docker_client)
src_file = 'src.txt'
dest_file = 'dest.txt'
dest_dir = 'dest'
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.copy_file_to_volume(self.TEST_CONTAINER_NAME, dest_file, dest_dir, src_file)
@mock.patch('edgectl.utils.EdgeUtils.copy_files')
@mock.patch('docker.APIClient', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_insert_file_win_raises_exception_when_copy_files_raises_os_except(self,
mock_docker_client,
mock_docker_api_client,
mock_copy_utils):
"""
Tests whether EdgeDeploymentError is raised copy host files into volume fails
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Windows'}
mock_docker_api_client.inspect_volume.return_value = {'Mountpoint': '\\\\some_path\\\\mount\\'}
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
mock_copy_utils.side_effect = OSError('os access error')
client = EdgeDockerClient.create_instance(mock_docker_client)
src_file = 'src.txt'
dest_file = 'dest.txt'
dest_dir = 'dest'
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.copy_file_to_volume(self.TEST_CONTAINER_NAME, dest_file, dest_dir, src_file)
@mock.patch('edgectl.utils.EdgeUtils.copy_files')
@mock.patch('docker.APIClient', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_insert_file_win_raises_exception_when_copy_files_raises_io_except(self,
mock_docker_client,
mock_docker_api_client,
mock_copy_utils):
"""
Tests whether EdgeDeploymentError is raised copy host files into volume fails
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Windows'}
mock_docker_api_client.inspect_volume.return_value = {'Mountpoint': '\\\\some_path\\\\mount\\'}
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
mock_copy_utils.side_effect = IOError('io access error')
client = EdgeDockerClient.create_instance(mock_docker_client)
src_file = 'src.txt'
dest_file = 'dest.txt'
dest_dir = 'dest'
# act, assert
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.copy_file_to_volume(self.TEST_CONTAINER_NAME, dest_file, dest_dir, src_file)
@mock.patch('edgectl.host.EdgeDockerClient.create_tar_objects')
@mock.patch('tarfile.TarFile', autospec=True)
@mock.patch('tarfile.TarInfo', autospec=True)
@mock.patch('io.BytesIO', autospec=True)
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_ins_file_in_ctr_linux_raises_except_when_io_except_raised(self,
mock_docker_client,
mock_container,
mock_tar_stream,
mock_tarinfo,
mock_tarfile,
mock_tar_factory):
"""
Tests whether EdgeDeploymentError is raised when opening host file fails
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Linux'}
mock_docker_client.containers.get.return_value = mock_container
mock_tar_factory.return_value = (mock_tar_stream, mock_tarinfo, mock_tarfile)
client = EdgeDockerClient.create_instance(mock_docker_client)
src_file = 'src.txt'
dest_file = 'dest.txt'
dest_dir = 'dest'
mocked_data = b'MOCKED_DATA'
test_time_value = 1518825244.88
# act, assert
with patch('time.time', MagicMock(return_value=test_time_value)):
with patch(OPEN_BUILTIN, mock_open(read_data=mocked_data)) as mocked_open:
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
mocked_open.side_effect = IOError('open io except')
client.copy_file_to_volume(self.TEST_CONTAINER_NAME, dest_file, dest_dir, src_file)
@mock.patch('edgectl.host.EdgeDockerClient.create_tar_objects')
@mock.patch('tarfile.TarFile', autospec=True)
@mock.patch('tarfile.TarInfo', autospec=True)
@mock.patch('io.BytesIO', autospec=True)
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_ins_file_in_ctr_linux_raises_except_when_os_except_raised(self,
mock_docker_client,
mock_container,
mock_tar_stream,
mock_tarinfo,
mock_tarfile,
mock_tar_factory):
"""
Tests whether EdgeDeploymentError is raised when opening host file fails
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Linux'}
mock_docker_client.containers.get.return_value = mock_container
mock_tar_factory.return_value = (mock_tar_stream, mock_tarinfo, mock_tarfile)
client = EdgeDockerClient.create_instance(mock_docker_client)
src_file = 'src.txt'
dest_file = 'dest.txt'
dest_dir = 'dest'
mocked_data = b'MOCKED_DATA'
test_time_value = 1518825244.88
# act, assert
with patch('time.time', MagicMock(return_value=test_time_value)):
with patch(OPEN_BUILTIN, mock_open(read_data=mocked_data)) as mocked_open:
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
mocked_open.side_effect = OSError('open os except')
client.copy_file_to_volume(self.TEST_CONTAINER_NAME, dest_file, dest_dir, src_file)
@mock.patch('edgectl.host.EdgeDockerClient.create_tar_objects')
@mock.patch('tarfile.TarFile', autospec=True)
@mock.patch('tarfile.TarInfo', autospec=True)
@mock.patch('io.BytesIO', autospec=True)
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_ins_file_in_ctr_linux_raises_except_when_put_archive_fails(self,
mock_docker_client,
mock_container,
mock_tar_stream,
mock_tarinfo,
mock_tarfile,
mock_tar_factory):
"""
Tests whether EdgeDeploymentError is raised when container put archive fails
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Linux'}
mock_docker_client.containers.get.return_value = mock_container
mock_tar_factory.return_value = (mock_tar_stream, mock_tarinfo, mock_tarfile)
mock_container.put_archive.side_effect = docker.errors.APIError('put archive error')
client = EdgeDockerClient.create_instance(mock_docker_client)
src_file = 'src.txt'
dest_file = 'dest.txt'
dest_dir = 'dest'
mocked_data = b'MOCKED_DATA'
test_time_value = 1518825244.88
# act, assert
with patch('time.time', MagicMock(return_value=test_time_value)):
with patch(OPEN_BUILTIN, mock_open(read_data=mocked_data)):
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.copy_file_to_volume(self.TEST_CONTAINER_NAME, dest_file, dest_dir, src_file)
@mock.patch('edgectl.host.EdgeDockerClient.create_tar_objects')
@mock.patch('tarfile.TarFile', autospec=True)
@mock.patch('tarfile.TarInfo', autospec=True)
@mock.patch('io.BytesIO', autospec=True)
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_ins_file_in_ctr_linux_raises_except_when_container_get_fails(self,
mock_docker_client,
mock_container,
mock_tar_stream,
mock_tarinfo,
mock_tarfile,
mock_tar_factory):
"""
Tests whether EdgeDeploymentError is raised when container get fails
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Linux'}
mock_docker_client.containers.get.return_value = mock_container
mock_tar_factory.return_value = (mock_tar_stream, mock_tarinfo, mock_tarfile)
mock_docker_client.containers.get.side_effect = docker.errors.APIError('get error')
client = EdgeDockerClient.create_instance(mock_docker_client)
src_file = 'src.txt'
dest_file = 'dest.txt'
dest_dir = 'dest'
mocked_data = b'MOCKED_DATA'
test_time_value = 1518825244.88
# act, assert
with patch('time.time', MagicMock(return_value=test_time_value)):
with patch(OPEN_BUILTIN, mock_open(read_data=mocked_data)):
with self.assertRaises(edgectl.errors.EdgeDeploymentError):
client.copy_file_to_volume(self.TEST_CONTAINER_NAME, dest_file, dest_dir, src_file)
@mock.patch('edgectl.host.EdgeDockerClient.create_tar_objects')
@mock.patch('tarfile.TarFile', autospec=True)
@mock.patch('tarfile.TarInfo', autospec=True)
@mock.patch('io.BytesIO', autospec=True)
@mock.patch('docker.models.containers.Container', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_copy_file_to_volume_linux_valid(self,
mock_docker_client,
mock_container,
mock_tar_stream,
mock_tarinfo,
mock_tarfile,
mock_tar_factory):
"""
Tests a valid invocation of copy_file_to_volume for docker OS type linux
"""
# arrange
mock_docker_client.info.return_value = {'OSType': 'Linux'}
mock_docker_client.containers.get.return_value = mock_container
mock_tar_factory.return_value = (mock_tar_stream, mock_tarinfo, mock_tarfile)
client = EdgeDockerClient.create_instance(mock_docker_client)
src_file = 'src.txt'
dest_file = 'dest.txt'
dest_dir = 'dest'
mocked_data = b'MOCKED_DATA'
test_time_value = 1518825244.88
# act
with patch('time.time', MagicMock(return_value=test_time_value)):
with patch(OPEN_BUILTIN, mock_open(read_data=mocked_data)) as mocked_open:
client.copy_file_to_volume(self.TEST_CONTAINER_NAME, dest_file, dest_dir, src_file)
# assert
#mock_tarfile.assert_called_with(fileobj=mock_tar_stream, mode='w')
mocked_open.assert_called_with(src_file, 'rb')
self.assertEqual(mock_tarinfo.size, len(mocked_data))
self.assertEqual(mock_tarinfo.mtime, test_time_value)
self.assertEqual(mock_tarinfo.mode, 0o444)
mock_tarfile.addfile.assert_called()
mock_tarfile.close.assert_called_with()
mock_tar_stream.seek.assert_called_with(0)
mock_docker_client.containers.get.assert_called_with(self.TEST_CONTAINER_NAME)
mock_container.put_archive(dest_dir, mock_tar_stream)
class TestEdgeDockerClose(unittest.TestCase):
"""Unit tests for API EdgeDockerClient.close"""
@mock.patch('docker.APIClient', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_closed_invoked_using_with_statement(self, mock_docker_client, mock_docker_api_client):
""" Test fails if close is not called implicitly using the with statement"""
# arrange
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
os_type = 'TEST_OS'
mock_docker_client.info.return_value = {'OSType': os_type}
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
with EdgeDockerClient.create_instance(mock_docker_client) as client:
client.get_os_type()
# assert
mock_docker_api_client.close.assert_called_with()
@mock.patch('docker.APIClient', autospec=True)
@mock.patch('docker.DockerClient', autospec=True)
def test_closed_invoked(self, mock_docker_client, mock_docker_api_client):
""" Test fails if close is not called """
# arrange
type(mock_docker_client).api = PropertyMock(return_value=mock_docker_api_client)
client = EdgeDockerClient.create_instance(mock_docker_client)
# act
client.close()
# assert
mock_docker_api_client.close.assert_called_with()
if __name__ == '__main__':
test_classes = [
TestEdgeDockerClientCheckAvailability,
TestEdgeDockerClientLogin,
TestEdgeDockerClientGetOSType,
TestEdgeDockerClientGetLocalImageSHAId,
TestEdgeDockerClientPull,
TestEdgeDockerContainerOps,
TestEdgeDockerNetworkCreate,
TestEdgeDockerVolumes,
TestEdgeDockerClose
]
suites_list = []
for test_class in test_classes:
suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
suites_list.append(suite)
SUITE = unittest.TestSuite(suites_list)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| 45.15446
| 103
| 0.647491
| 6,720
| 62,268
| 5.689137
| 0.047768
| 0.073762
| 0.09835
| 0.056917
| 0.901023
| 0.877979
| 0.865423
| 0.85245
| 0.839973
| 0.819597
| 0
| 0.002778
| 0.271552
| 62,268
| 1,378
| 104
| 45.187228
| 0.840076
| 0.127915
| 0
| 0.753459
| 0
| 0
| 0.09802
| 0.030451
| 0
| 0
| 0
| 0
| 0.127044
| 1
| 0.084277
| false
| 0.023899
| 0.010063
| 0
| 0.115723
| 0.001258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
709050bf8de6bf9b5d8a52b2539398bec03527a3
| 905,363
|
py
|
Python
|
zas_rep_tools/tests/test_stats.py
|
savin-berlin/zas-rep-tools
|
fcdaa2f70ee1b6a4124292ae42e3c9d508eb0b28
|
[
"MIT"
] | null | null | null |
zas_rep_tools/tests/test_stats.py
|
savin-berlin/zas-rep-tools
|
fcdaa2f70ee1b6a4124292ae42e3c9d508eb0b28
|
[
"MIT"
] | null | null | null |
zas_rep_tools/tests/test_stats.py
|
savin-berlin/zas-rep-tools
|
fcdaa2f70ee1b6a4124292ae42e3c9d508eb0b28
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# : Tests for XXX Module
# Author:
# c(Developer) -> {'Egor Savin'}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###Programm Info######
#
#
#
#
#
import unittest
import os
import logging
import sure
import copy
from nose.plugins.attrib import attr
from testfixtures import tempdir, TempDirectory
from distutils.dir_util import copy_tree
import json
import csv
from collections import Counter, defaultdict
#from zas_rep_tools.src.classes.configer import Configer
from zas_rep_tools.src.classes.stats import Stats
from zas_rep_tools.src.classes.corpus import Corpus
from zas_rep_tools.src.utils.debugger import p, wipd, wipdn, wipdl, wipdo
from zas_rep_tools.src.utils.basetester import BaseTester
import zas_rep_tools.src.utils.db_helper as db_helper
import platform
if platform.uname()[0].lower() !="windows":
import colored_traceback
colored_traceback.add_hook()
else:
import colorama
class TestZASstatsStats(BaseTester,unittest.TestCase):
#_multiprocess_can_split_ = True
_multiprocess_shared_ = True
#@classmethod
def setUp(self):
super(type(self), self).setUp()
########### EN ##############
self.test_dict_row_en_1 = {u'star_constellation': u'lion', u'text': u'[[[["I", "PRP"], ["loved", "VBD"], ["it", "PRP"], [".", "symbol"]], ["positive", 0.7]], [[["But", "CC"], ["it", "PRP"], ["was", "VBD"], ["also", "RB"], ["verrrryyyyy", "JJ"], ["vvveRRRRRRrry", "NNP"], ["very", "RB"], ["piiiiiiiiity", "JJ"], ["pity", "NN"], ["pity", "NN"], ["piiitttyyy", "NN"], ["for", "IN"], ["me", "PRP"], ["......", "symbol"], [":-(((((", "EMOASC"], ["@real_trump", "mention"], ["#sheetlife", "hashtag"], ["#readytogo", "hashtag"], ["http://www.absurd.com", "URL"]], ["negative", -0.1875]]]', u'age': 37, u'working_area': u'IT', u'rowid': 1, u'gender': u'w', u'id': 1111}
self.test_dict_row_en_2 = {u'star_constellation': u'lion', u'text': u'[[[["Tiny", "JJ"], ["model", "NN"], [",", "symbol"], ["but", "CC"], ["a", "DT"], ["big", "JJ"], ["big", "JJ"], ["big", "JJ"], ["explaaaaanation", "NN"], [".", "symbol"]], ["neutral", 0.0]], [[["Riiiiiight", "UH"], ["?", "symbol"]], ["neutral", 0.0]], [[["What", "WP"], ["do", "VBP"], ["youuuuuu", "PRP"], ["think", "VB"], ["about", "IN"], ["it", "PRP"], ["????", "symbol"]], ["neutral", 0.0]]]', u'age': 35, u'working_area': u'Air Industry', u'rowid': 5, u'gender': u'w', u'id': 5555}
########## DE ###############
self.test_dict_row_de_1 = {u'star_constellation': u'fish', u'text': u'[[[["Klitze", "NN"], ["kliiiitze", "VMFIN"], ["kleEEEEine", "NE"], ["kleinnne", "ADJA"], ["\\u00dcberaschung", "NN"], [".", "symbol"]], ["neutral", 0.0]], [[["Trotzdem", "PAV"], ["hat", "VAFIN"], ["sie", "PPER"], ["mich", "PPER"], ["gl\\u00fccklich", "ADJD"], ["gemacht", "VVPP"], ["!", "symbol"], [":-))))", "EMOASC"], ["-)))", "EMOASC"]], ["positive", 0.5]]]', u'age': 23, u'working_area': u'Care', u'rowid': 8, u'gender': u'm', u'id': 8888}
self.test_dict_row_de_2 = {u'star_constellation': u'aquarius', u'text': u'[[[["einen", "ART"], ["wundersch\\u00f6nen", "ADJA"], ["Taaaaaagggggg", "NN"], ["w\\u00fcnsche", "VVFIN"], ["ich", "PPER"], ["euch", "PRF"], [".", "symbol"]], ["neutral", 0.0]], [[["Genieeeeeeeeeeesst", "NN"], ["geniiiiiiiiiiiiist", "VVFIN"], ["das", "ART"], ["Leben", "NN"], [".", "symbol"]], ["neutral", 0.0]], [[["Bleeeeeeeeibt", "NN"], ["bleeeeibt", "VVFIN"], ["Huuuuuuuuuuuungrig", "NN"], [".", "symbol"], ["\\ud83d\\ude00\\ud83d\\ude00\\ud83d\\ude00\\ud83d\\ude00\\ud83d\\ude00", "EMOIMG"], ["\\ud83c\\udf08\\ud83c\\udf08\\ud83c\\udf08\\ud83c\\udf08\\ud83c\\udf08\\ud83c\\udf08\\ud83c\\udf08", "EMOIMG"]], ["neutral", 0.0]]]', u'age': 22, u'working_area': u'Finance', u'rowid': 9, u'gender': u'w', u'id': 9999}
self.docs_ids = {
self.test_dict_row_en_1["id"]:self.test_dict_row_en_1,
self.test_dict_row_en_2["id"]:self.test_dict_row_en_2,
self.test_dict_row_de_1["id"]:self.test_dict_row_de_1,
self.test_dict_row_de_2["id"]:self.test_dict_row_de_2,
}
self.gold_standard_data = {
"lower":{
"repl":["rep_lower",""],
"redu": [],
},
}
self.path_to_stats_test_data = "data/tests_data/stats/"
#@classmethod
def tearDown(self):
super(type(self), self).tearDown()
####################################################################################################
####################################################################################################
###################### START STABLE TESTS #########################################################
####################################################################################################
####################################################################################################
###################INITIALISATION:000############################################
###### xxx: 000 ######
##### xx :0== ######
@attr(status='stable')
#@wipd
def test_initialization_of_the_stats_instance_000(self):
stats = Stats(mode=self.mode)
stats.should.be.a(Stats)
##### throws_exceptions:050 ######
#################################Beginn##############################################
############################INTERN METHODS###########################################
#####################################################################################
################### :100############################################
###### ***** ######
###### ***** ######
###### ***** ######
#################################END#################################################
############################INTERN METHODS###########################################
#####################################################################################
#################################Beginn##############################################
############################EXTERN METHODS###########################################
#####################################################################################
################### Corpus Initialization :500############################################
@attr(status='stable')
#@wipd
def test_new_plaintext_stats_initialization_500(self):
self.prj_folder()
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
stats = Stats(mode=self.mode)
#stats = Corpus(logger_level=logging.DEBUG)
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, baseline_delimiter="++")
#p(stats.statsdb.get_all_attr())
assert stats.exist()
@attr(status='stable')
#@wipd
def test_new_encrypted_stats_initialization_501(self):
self.prj_folder()
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
stats = Stats(mode=self.mode)
#stats = Corpus(logger_level=logging.DEBUG)
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
assert stats.exist()
@attr(status='stable')
#@wipd
def test_open_plaintext_blogger_stats_502(self):
self.prj_folder()
self.test_dbs()
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
stats = Stats(mode=self.mode)
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_en))
#p(stats.statsdb.get_all_attr("main"))
stats.statsdb.get_all_attr("main")['name'].should.be.equal(name)
#stats.statsdb.get_all_attr("main")['language'].should.be.equal(language)
stats.statsdb.get_all_attr("main")['visibility'].should.be.equal(visibility)
#stats.statsdb.get_all_attr("main")['platform_name'].should.be.equal(platform_name)
stats.statsdb.get_all_attr("main")['typ'].should.be.equal(typ)
stats.statsdb.get_all_attr("main")['id'].should.be.equal(stats_id)
stats.statsdb.get_all_attr("main")['version'].should.be.equal(version)
assert stats.exist()
@attr(status='stable')
#@wipdl
def test_open_encrypted_twitter_stats_503(self):
self.prj_folder()
self.test_dbs()
name = self.configer.init_info_data["twitter"]["name"]
language = self.configer.init_info_data["twitter"]["language"]
visibility = self.configer.init_info_data["twitter"]["visibility"]
platform_name = self.configer.init_info_data["twitter"]["platform_name"]
license = self.configer.init_info_data["twitter"]["license"]
template_name = self.configer.init_info_data["twitter"]["template_name"]
version = self.configer.init_info_data["twitter"]["version"]
source = self.configer.init_info_data["twitter"]["source"]
encryption_key = self.configer.init_info_data["twitter"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["twitter"]["id"]["corpus"]
stats_id = self.configer.init_info_data["twitter"]["id"]["stats"]
typ= "stats"
#p(encryption_key)
stats = Stats(mode=self.mode)
stats.open(os.path.join(self.tempdir_testdbs,self.db_twitter_encrypted_stats_de), encryption_key=encryption_key)
stats.statsdb.get_all_attr("main")['name'].should.be.equal(name)
#stats.statsdb.get_all_attr("main")['language'].should.be.equal(language)
stats.statsdb.get_all_attr("main")['visibility'].should.be.equal(visibility)
#stats.statsdb.get_all_attr("main")['platform_name'].should.be.equal(platform_name)
stats.statsdb.get_all_attr("main")['typ'].should.be.equal(typ)
stats.statsdb.get_all_attr("main")['id'].should.be.equal(stats_id)
stats.statsdb.get_all_attr("main")['version'].should.be.equal(version)
assert stats.exist()
@attr(status='stable')
#@wipd
def test_attach_corpdb_504(self):
self.prj_folder()
self.test_dbs()
#p(encryption_key)
### plaintext
stats = Stats(mode=self.mode)
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_en))
stats.attach_corpdb(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
assert stats.exist()
#p(stats.attached_corpdb_number())
assert stats.attached_corpdb_number() == 1
### excrypted
encryption_key_corp = self.configer.init_info_data["twitter"]["encryption_key"]["corpus"]
encryption_key_stats = self.configer.init_info_data["twitter"]["encryption_key"]["stats"]
stats = Stats(mode=self.mode)
stats.open(os.path.join(self.tempdir_testdbs,self.db_twitter_encrypted_stats_de), encryption_key=encryption_key_stats)
stats.attach_corpdb(os.path.join(self.tempdir_testdbs,self.db_twitter_encrypted_corp_de), encryption_key=encryption_key_corp)
assert stats.exist()
#assert stats.attached_corpdb_number() == 1
################### :600############################################
@attr(status='stable')
#@wipd
def test_extract_repl_lower_case_600(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode, )#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Corpus(logger_level=logging.DEBUG)
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
#p(stats.statsdb, "stats.statsdb")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
stats.corp = corp
stats._corp_info = corp.info()
## DE####
### ROW 1 ###
extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container = stats.extract_replications(json.loads(self.test_dict_row_de_1["text"]))
#p(repl_free_text_container, "repl_free_text_container", c="m")
#p([t[0] for i in json.loads(self.test_dict_row_de_1["text"]) for t in i[0]])
# p((len(extracted_repl_in_text_container),extracted_repl_in_text_container), "extracted_repl_in_text_container")
# p((len(repl_free_text_container),repl_free_text_container), "repl_free_text_container")
# p((len(rle_for_repl_in_text_container),rle_for_repl_in_text_container), "rle_for_repl_in_text_container")
extracted_repl_in_text_container.should.be.equal([['', [(u'i', 4, 2)], [(u'e', 5, 2)], [(u'n', 3, 4)], '', ''], ['', '', '', '', '', '', '', [(u')', 4, 2)], [(u')', 3, 1)]]])
repl_free_text_container.should.be.equal([[u'klitze', u'klitze', u'kleine', u'kleine', u'\xfcberaschung', u'.'], [u'trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)']])
rle_for_repl_in_text_container.should.be.equal([['', u'kli^4tze', u'kle^5ine', u'klein^3e', '', ''], ['', '', '', '', '', '', '', u':-)^4', u'-)^3']])
#repl_free_de_row_lowercased_1 = [[u'klitze', u'klitze', u'kleine', u'kleine', u'\xfcberaschung', u'.'], [u'trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)']]
### ROW 2 ###
extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container = stats.extract_replications(json.loads(self.test_dict_row_de_2["text"]) )
# p([t[0] for i in json.loads(self.test_dict_row_de_2["text"]) for t in i[0]])
# p((len(extracted_repl_in_text_container),extracted_repl_in_text_container), "extracted_repl_in_text_container")
# p((len(repl_free_text_container),repl_free_text_container), "repl_free_text_container")
# p((len(rle_for_repl_in_text_container),rle_for_repl_in_text_container), "rle_for_repl_in_text_container")
extracted_repl_in_text_container.should.be.equal([['', '', [(u'a', 6, 1), (u'g', 6, 2)], '', '', '', ''], [[(u'e', 11, 4)], [(u'i', 13, 3)], '', '', ''], [[(u'e', 8, 2)], [(u'e', 4, 2)], [(u'u', 12, 1)], '', [(u'\U0001f600', 5, 0)], [(u'\U0001f308', 7, 0)]]])
repl_free_text_container.should.be.equal([[u'einen', u'wundersch\xf6nen', u'tag', u'w\xfcnsche', u'ich', u'euch', u'.'], [u'geniest', u'genist', u'das', u'leben', u'.'], [u'bleibt', u'bleibt', u'hungrig', u'.', u'\U0001f600', u'\U0001f308']])
rle_for_repl_in_text_container.should.be.equal([['', '', u'ta^6g^6', '', '', '', ''], [u'genie^11s^2t', u'geni^13st', '', '', ''], [u'ble^8ibt', u'ble^4ibt', u'hu^12ngrig', '', u'\U0001f600^5', u'\U0001f308^7']])
#repl_free_de_row_lowercased_2 = [[u'einen', u'wundersch\xf6nen', u'tag', u'w\xfcnsche', u'ich', u'euch', u'.'], [u'geniest', u'genist', u'das', u'leben', u'.'], [u'bleibt', u'bleibt', u'hungrig', u'.', u'\U0001f600', u'\U0001f308']]
# ########### EN ##############
### ROW 1 ###
extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container = stats.extract_replications(json.loads(self.test_dict_row_en_1["text"]) )
#p([t[0] for i in json.loads(self.test_dict_row_en_1["text"]) for t in i[0]])
# p((len(extracted_repl_in_text_container),extracted_repl_in_text_container), "extracted_repl_in_text_container")
# p((len(repl_free_text_container),repl_free_text_container), "repl_free_text_container")
# p((len(rle_for_repl_in_text_container),rle_for_repl_in_text_container), "rle_for_repl_in_text_container")
extracted_repl_in_text_container.should.be.equal([['', '', '', ''], ['', '', '', '', [(u'r', 4, 2), (u'y', 5, 3)], [(u'v', 3, 0), (u'r', 8, 2)], '', [(u'i', 9, 1)], '', '', [(u'i', 3, 1), (u't', 3, 2), (u'y', 3, 3)], '', '', [(u'.', 6, 0)], [(u'(', 5, 2)], '', '', '', '']])
repl_free_text_container.should.be.equal([[u'i', u'loved', u'it', u'.'], [u'but', u'it', u'was', u'also', u'very', u'very', u'very', u'pity', u'pity', u'pity', u'pity', u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']])
rle_for_repl_in_text_container.should.be.equal([['', '', '', ''], ['', '', '', '', u'ver^4y^5', u'v^3er^8y', '', u'pi^9ty', '', '', u'pi^3t^3y^3', '', '', u'.^6', u':-(^5', '', '', '', '']])
#repl_free_en_row_lowercased_1 = [[u'i', u'loved', u'it', u'.'], [u'but', u'it', u'was', u'also', u'very', u'very', u'very', u'pity', u'pity', u'pity', u'pity', u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']]
### ROW 2 ###
extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container = stats.extract_replications(json.loads(self.test_dict_row_en_2["text"]) )
# p([t[0] for i in json.loads(self.test_dict_row_en_2["text"]) for t in i[0]])
# p((len(extracted_repl_in_text_container),extracted_repl_in_text_container), "extracted_repl_in_text_container")
# p((len(repl_free_text_container),repl_free_text_container), "repl_free_text_container")
# p((len(rle_for_repl_in_text_container),rle_for_repl_in_text_container), "rle_for_repl_in_text_container")
extracted_repl_in_text_container.should.be.equal([['', '', '', '', '', '', '', '', [(u'a', 5, 4)], ''], [[(u'i', 6, 1)], ''], ['', '', [(u'u', 6, 2)], '', '', '', [(u'?', 4, 0)]]])
repl_free_text_container.should.be.equal([[u'tiny', u'model', u',', u'but', u'a', u'big', u'big', u'big', u'explanation', u'.'], [u'right', u'?'], [u'what', u'do', u'you', u'think', u'about', u'it', u'?']])
rle_for_repl_in_text_container.should.be.equal([['', '', '', '', '', '', '', '', u'expla^5nation', ''], [u'ri^6ght', ''], ['', '', u'you^6', '', '', '', u'?^4']])
#repl_free_en_row_lowercased_1 = [[u'tiny', u'model', u',', u'but', u'a', u'big', u'big', u'big', u'explanation', u'.'], [u'right', u'?'], [u'what', u'do', u'you', u'think', u'about', u'it', u'?']]
@attr(status='stable')
#@wipd
def test_extract_repl_case_sensitiv_601(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode)#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Corpus(logger_level=logging.DEBUG)
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, case_sensitiv=True, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
#p(stats.statsdb, "stats.statsdb")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
stats.corp = corp
stats._corp_info = corp.info()
### DE####
### ROW 1 ###
extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container = stats.extract_replications(json.loads(self.test_dict_row_de_1["text"]) )
# p([t[0] for i in json.loads(self.test_dict_row_de_1["text"]) for t in i[0]])
# p((len(extracted_repl_in_text_container),extracted_repl_in_text_container), "extracted_repl_in_text_container")
# p((len(repl_free_text_container),repl_free_text_container), "repl_free_text_container")
# p((len(rle_for_repl_in_text_container),rle_for_repl_in_text_container), "rle_for_repl_in_text_container")
extracted_repl_in_text_container.should.be.equal([['', [(u'i', 4, 2)], [(u'E', 4, 3)], [(u'n', 3, 4)], '', ''], ['', '', '', '', '', '', '', [(u')', 4, 2)], [(u')', 3, 1)]]])
repl_free_text_container.should.be.equal([[u'Klitze', u'klitze', u'kleEine', u'kleine', u'\xdcberaschung', u'.'], [u'Trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)']])
rle_for_repl_in_text_container.should.be.equal([['', u'kli^4tze', u'kleE^4ine', u'klein^3e', '', ''], ['', '', '', '', '', '', '', u':-)^4', u'-)^3']])
### ROW 2 ###
extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container = stats.extract_replications(json.loads(self.test_dict_row_de_2["text"]) )
# p([t[0] for i in json.loads(self.test_dict_row_de_2["text"]) for t in i[0]])
# p((len(extracted_repl_in_text_container),extracted_repl_in_text_container), "extracted_repl_in_text_container")
# p((len(repl_free_text_container),repl_free_text_container), "repl_free_text_container")
# p((len(rle_for_repl_in_text_container),rle_for_repl_in_text_container), "rle_for_repl_in_text_container")
extracted_repl_in_text_container.should.be.equal([['', '', [(u'a', 6, 1), (u'g', 6, 2)], '', '', '', ''], [[(u'e', 11, 4)], [(u'i', 13, 3)], '', '', ''], [[(u'e', 8, 2)], [(u'e', 4, 2)], [(u'u', 12, 1)], '', [(u'\U0001f600', 5, 0)], [(u'\U0001f308', 7, 0)]]])
repl_free_text_container.should.be.equal([[u'einen', u'wundersch\xf6nen', u'Tag', u'w\xfcnsche', u'ich', u'euch', u'.'], [u'Geniest', u'genist', u'das', u'Leben', u'.'], [u'Bleibt', u'bleibt', u'Hungrig', u'.', u'\U0001f600', u'\U0001f308']])
rle_for_repl_in_text_container.should.be.equal([['', '', u'Ta^6g^6', '', '', '', ''], [u'Genie^11s^2t', u'geni^13st', '', '', ''], [u'Ble^8ibt', u'ble^4ibt', u'Hu^12ngrig', '', u'\U0001f600^5', u'\U0001f308^7']])
# ########### EN ##############
### ROW 1 ###
extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container = stats.extract_replications(json.loads(self.test_dict_row_en_1["text"]) )
# p([t[0] for i in json.loads(self.test_dict_row_en_1["text"]) for t in i[0]]) #[u'I', u'loved', u'it', u'.', u'But', u'it', u'was', u'also', u'verrrryyyyy', u'vvveRRRRRRrry', u'very', u'piiiiiiiiity', u'pity', u'pity', u'piiitttyyy', u'for', u'me', u'......', u':-(((((', u'@real_trump', u'#sheetlife', u'#readytogo', u'http://www.absurd.com']
# p((len(extracted_repl_in_text_container),extracted_repl_in_text_container), "extracted_repl_in_text_container")
# p((len(repl_free_text_container),repl_free_text_container), "repl_free_text_container")
# p((len(rle_for_repl_in_text_container),rle_for_repl_in_text_container), "rle_for_repl_in_text_container")
extracted_repl_in_text_container.should.be.equal([['', '', '', ''], ['', '', '', '', [(u'r', 4, 2), (u'y', 5, 3)], [(u'v', 3, 0), (u'R', 6, 2)], '', [(u'i', 9, 1)], '', '', [(u'i', 3, 1), (u't', 3, 2), (u'y', 3, 3)], '', '', [(u'.', 6, 0)], [(u'(', 5, 2)], '', '', '', '']])
repl_free_text_container.should.be.equal([[u'I', u'loved', u'it', u'.'], [u'But', u'it', u'was', u'also', u'very', u'veRry', u'very', u'pity', u'pity', u'pity', u'pity', u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']])
rle_for_repl_in_text_container.should.be.equal([['', '', '', ''], ['', '', '', '', u'ver^4y^5', u'v^3eR^6r^2y', '', u'pi^9ty', '', '', u'pi^3t^3y^3', '', '', u'.^6', u':-(^5', '', '', '', '']])
### ROW 2 ###
extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container = stats.extract_replications(json.loads(self.test_dict_row_en_2["text"]) )
# p([t[0] for i in json.loads(self.test_dict_row_en_2["text"]) for t in i[0]]) #[u'Tiny', u'model', u',', u'but', u'a', u'big', u'big', u'big', u'explaaaaanation', u'.', u'Riiiiiight', u'?', u'What', u'do', u'youuuuuu', u'think', u'about', u'it', u'????']
# p((len(extracted_repl_in_text_container),extracted_repl_in_text_container), "extracted_repl_in_text_container")
# p((len(repl_free_text_container),repl_free_text_container), "repl_free_text_container")
# p((len(rle_for_repl_in_text_container),rle_for_repl_in_text_container), "rle_for_repl_in_text_container")
extracted_repl_in_text_container.should.be.equal([['', '', '', '', '', '', '', '', [(u'a', 5, 4)], ''], [[(u'i', 6, 1)], ''], ['', '', [(u'u', 6, 2)], '', '', '', [(u'?', 4, 0)]]])
repl_free_text_container.should.be.equal([[u'Tiny', u'model', u',', u'but', u'a', u'big', u'big', u'big', u'explanation', u'.'], [u'Right', u'?'], [u'What', u'do', u'you', u'think', u'about', u'it', u'?']])
rle_for_repl_in_text_container.should.be.equal([['', '', '', '', '', '', '', '', u'expla^5nation', ''], [u'Ri^6ght', ''], ['', '', u'you^6', '', '', '', u'?^4']])
@attr(status='stable')
#@wipd
def test_insert_repl_into_db_lower_case_602(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Corpus(logger_level=logging.DEBUG)
#p(stats.statsdb, "stats.statsdb")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
## DE####
import Stemmer
stemmer = Stemmer.Stemmer("de")
### ROW 1 ###
stats = Stats(mode=self.mode, )#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
stats.corp = corp
stats._corp_info = corp.info()
text_list = [self.test_dict_row_de_1["id"],self.test_dict_row_de_1["text"]]
rle_for_repl_in_text_container = [['', u'kli^4tze', u'kle^5ine', u'klein^3e', '', ''], ['', '', '', '', '', '', '', u':-)^4', u'-)^3']]
extracted_repl_in_text_container = [['', [(u'i', 4, 2)], [(u'e', 5, 2)], [(u'n', 3, 4)], '', ''], ['', '', '', '', '', '', '', [(u')', 4, 2)], [(u')', 3, 1)]]]
repl_free_text_container = [[u'klitze', u'klitze', u'kleine', u'kleine', u'\xfcberaschung', u'.'], [u'trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)']]
redu_free_text_container = [[(u'klitze', {u'klitze': 1, u'kli^4tze': 1}), (u'kleine', {u'kle^5ine': 1, u'klein^3e': 1}), u'\xfcberaschung', u'.'], [u'trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)']]
stemmed_text_container = [ [stemmer.stemWord(token) if isinstance(token, (str, unicode) ) else stemmer.stemWord(token[0]) for token in sent] for sent in redu_free_text_container ]
#p(stemmed_text_container ,"stemmed_text_container ")
mapping_redu = [[0, 2, 4, 5], [0, 1, 2, 3, 4, 5, 6, 7, 8]]
stats.insert_repl_into_db( text_list,json.loads(self.test_dict_row_de_1["text"]),extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container,redu_free_text_container,mapping_redu,stemmed_text_container)
stats._write_repl_into_db(thread_name="Thread0")
stats._write_redu_into_db(thread_name="Thread0")
#p(list(stats.statsdb.getall("replications")))
right_output = [
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'klitz', u'i', 4, 2, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(3, 8888, u'[4, 9]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(4, 8888, u'[4, 9]', u'[1, 7]', u'[1, 7]', u':-)', u':-)^4', u':-)', u')', 4, 2, None, u'EMOASC', u'["positive", 0.5]', u'sie', u'["PPER", null, "sie"]', u'mich', u'["PPER", null, "mich"]', u'gl\xfccklich', u'["ADJD", null, "glucklich"]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u'-)', u'["EMOASC", null, "-)"]', None, None, None, None, None, None, None, None),
(5, 8888, u'[4, 9]', u'[1, 8]', u'[1, 8]', u'-)', u'-)^3', u'-)', u')', 3, 1, None, u'EMOASC', u'["positive", 0.5]', u'mich', u'["PPER", null, "mich"]', u'gl\xfccklich', u'["ADJD", null, "glucklich"]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u':-)', u'["EMOASC", null, ":-)"]', None, None, None, None, None, None, None, None, None, None)]
list(stats.statsdb.getall("replications")).should.be.equal(right_output)
# # ### ROW 2 ###
stats = Stats(mode=self.mode, )#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
stats.corp = corp
stats._corp_info = corp.info()
text_list = [self.test_dict_row_de_2["id"],self.test_dict_row_de_2["text"]]
rle_for_repl_in_text_container = [['', '', u'ta^6g^6', '', '', '', ''], [u'genie^11s^2t', u'geni^13st', '', '', ''], [u'ble^8ibt', u'ble^4ibt', u'hu^12ngrig', '', u'\U0001f600^5', u'\U0001f308^7']]
extracted_repl_in_text_container = [['', '', [(u'a', 6, 1), (u'g', 6, 2)], '', '', '', ''], [[(u'e', 11, 4)], [(u'i', 13, 3)], '', '', ''], [[(u'e', 8, 2)], [(u'e', 4, 2)], [(u'u', 12, 1)], '', [(u'\U0001f600', 5, 0)], [(u'\U0001f308', 7, 0)]]]
repl_free_text_container = [[u'einen', u'wundersch\xf6nen', u'tag', u'w\xfcnsche', u'ich', u'euch', u'.'], [u'geniest', u'genist', u'das', u'leben', u'.'], [u'bleibt', u'bleibt', u'hungrig', u'.', u'\U0001f600', u'\U0001f308']]
redu_free_text_container = [[u'einen', u'wundersch\xf6nen', u'tag', u'w\xfcnsche', u'ich', u'euch', u'.'], [u'geniest', u'genist', u'das', u'leben', u'.'], [(u'bleibt', {u'ble^4ibt': 1, u'ble^8ibt': 1}), u'hungrig', u'.', u'\U0001f600', u'\U0001f308']]
mapping_redu = [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4], [0, 2, 3, 4, 5]]
stemmed_text_container = [ [stemmer.stemWord(token) if isinstance(token, (str, unicode) ) else stemmer.stemWord(token[0]) for token in sent] for sent in redu_free_text_container ]
stats.insert_repl_into_db(text_list,json.loads(self.test_dict_row_de_2["text"]),extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container,redu_free_text_container,mapping_redu,stemmed_text_container)
stats._write_repl_into_db(thread_name="Thread0")
stats._write_redu_into_db(thread_name="Thread0")
#p(list(stats.statsdb.getall("replications")))
right_output = [(1, 9999, u'[7, 5, 5]', u'[0, 2]', u'[0, 2]', u'tag', u'ta^6g^6', u'tag', u'a', 6, 1, None, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, u'einen', u'["ART", null, "ein"]', u'wundersch\xf6nen', u'["ADJA", null, "wunderschon"]', u'w\xfcnsche', u'["VVFIN", null, "wunsch"]', u'ich', u'["PPER", null, "ich"]', u'euch', u'["PRF", null, "euch"]', u'.', u'["symbol", null, "."]', u'geniest', u'["NN", null, "geni"]'),
(2, 9999, u'[7, 5, 5]', u'[0, 2]', u'[0, 2]', u'tag', u'ta^6g^6', u'tag', u'g', 6, 2, None, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, u'einen', u'["ART", null, "ein"]', u'wundersch\xf6nen', u'["ADJA", null, "wunderschon"]', u'w\xfcnsche', u'["VVFIN", null, "wunsch"]', u'ich', u'["PPER", null, "ich"]', u'euch', u'["PRF", null, "euch"]', u'.', u'["symbol", null, "."]', u'geniest', u'["NN", null, "geni"]'),
(3, 9999, u'[7, 5, 5]', u'[1, 0]', u'[1, 0]', u'geniest', u'genie^11s^2t', u'geni', u'e', 11, 4, None, u'NN', u'["neutral", 0.0]', u'tag', u'["NN", null, "tag"]', u'w\xfcnsche', u'["VVFIN", null, "wunsch"]', u'ich', u'["PPER", null, "ich"]', u'euch', u'["PRF", null, "euch"]', u'.', u'["symbol", null, "."]', u'genist', u'["VVFIN", null, "genist"]', u'das', u'["ART", null, "das"]', u'leben', u'["NN", null, "leb"]', u'.', u'["symbol", null, "."]', u'bleibt', u'["NN", {"ble^4ibt": 1, "ble^8ibt": 1}, "bleibt"]'),
(4, 9999, u'[7, 5, 5]', u'[1, 1]', u'[1, 1]', u'genist', u'geni^13st', u'genist', u'i', 13, 3, None, u'VVFIN', u'["neutral", 0.0]', u'w\xfcnsche', u'["VVFIN", null, "wunsch"]', u'ich', u'["PPER", null, "ich"]', u'euch', u'["PRF", null, "euch"]', u'.', u'["symbol", null, "."]', u'geniest', u'["NN", null, "geni"]', u'das', u'["ART", null, "das"]', u'leben', u'["NN", null, "leb"]', u'.', u'["symbol", null, "."]', u'bleibt', u'["NN", {"ble^4ibt": 1, "ble^8ibt": 1}, "bleibt"]', u'hungrig', u'["NN", null, "hungrig"]'),
(5, 9999, u'[7, 5, 5]', u'[2, 0]', u'[2, 0]', u'bleibt', u'ble^8ibt', u'bleibt', u'e', 8, 2, u'[2, 0]', u'NN', u'["neutral", 0.0]', u'geniest', u'["NN", null, "geni"]', u'genist', u'["VVFIN", null, "genist"]', u'das', u'["ART", null, "das"]', u'leben', u'["NN", null, "leb"]', u'.', u'["symbol", null, "."]', u'hungrig', u'["NN", null, "hungrig"]', u'.', u'["symbol", null, "."]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', None, None),
(6, 9999, u'[7, 5, 5]', u'[2, 1]', u'[2, 0]', u'bleibt', u'ble^4ibt', u'bleibt', u'e', 4, 2, u'[2, 0]', u'NN', u'["neutral", 0.0]', u'geniest', u'["NN", null, "geni"]', u'genist', u'["VVFIN", null, "genist"]', u'das', u'["ART", null, "das"]', u'leben', u'["NN", null, "leb"]', u'.', u'["symbol", null, "."]', u'hungrig', u'["NN", null, "hungrig"]', u'.', u'["symbol", null, "."]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', None, None),
(7, 9999, u'[7, 5, 5]', u'[2, 2]', u'[2, 1]', u'hungrig', u'hu^12ngrig', u'hungrig', u'u', 12, 1, None, u'NN', u'["neutral", 0.0]', u'genist', u'["VVFIN", null, "genist"]', u'das', u'["ART", null, "das"]', u'leben', u'["NN", null, "leb"]', u'.', u'["symbol", null, "."]', u'bleibt', u'["NN", {"ble^4ibt": 1, "ble^8ibt": 1}, "bleibt"]', u'.', u'["symbol", null, "."]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', None, None, None, None),
(8, 9999, u'[7, 5, 5]', u'[2, 4]', u'[2, 3]', u'\U0001f600', u'\U0001f600^5', u'\U0001f600', u'\U0001f600', 5, 0, None, u'EMOIMG', u'["neutral", 0.0]', u'leben', u'["NN", null, "leb"]', u'.', u'["symbol", null, "."]', u'bleibt', u'["NN", {"ble^4ibt": 1, "ble^8ibt": 1}, "bleibt"]', u'hungrig', u'["NN", null, "hungrig"]', u'.', u'["symbol", null, "."]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', None, None, None, None, None, None, None, None),
(9, 9999, u'[7, 5, 5]', u'[2, 5]', u'[2, 4]', u'\U0001f308', u'\U0001f308^7', u'\U0001f308', u'\U0001f308', 7, 0, None, u'EMOIMG', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'bleibt', u'["NN", {"ble^4ibt": 1, "ble^8ibt": 1}, "bleibt"]', u'hungrig', u'["NN", null, "hungrig"]', u'.', u'["symbol", null, "."]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', None, None, None, None, None, None, None, None, None, None)]
list(stats.statsdb.getall("replications")).should.be.equal(right_output)
# # ########### EN ##############
stemmer = Stemmer.Stemmer("en")
# # ### ROW 1 ###
stats = Stats(mode=self.mode, )#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
stats.corp = corp
stats._corp_info = corp.info()
text_list = [self.test_dict_row_en_1["id"],self.test_dict_row_en_1["text"]]
rle_for_repl_in_text_container = [['', '', '', ''], ['', '', '', '', u'ver^4y^5', u'v^3er^8y', '', u'pi^9ty', '', '', u'pi^3t^3y^3', '', '', u'.^6', u':-(^5', '', '', '', '']]
extracted_repl_in_text_container = [['', '', '', ''], ['', '', '', '', [(u'r', 4, 2), (u'y', 5, 3)], [(u'v', 3, 0), (u'r', 8, 2)], '', [(u'i', 9, 1)], '', '', [(u'i', 3, 1), (u't', 3, 2), (u'y', 3, 3)], '', '', [(u'.', 6, 0)], [(u'(', 5, 2)], '', '', '', '']]
repl_free_text_container = [[u'i', u'loved', u'it', u'.'], [u'but', u'it', u'was', u'also', u'very', u'very', u'very', u'pity', u'pity', u'pity', u'pity', u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']]
redu_free_text_container =[[u'i', u'loved', u'it', u'.'], [u'but', u'it', u'was', u'also', (u'very', {u'very': 1, u'ver^4y^5': 1, u'v^3er^8y': 1}), (u'pity', {u'pity': 2, u'pi^3t^3y^3': 1, u'pi^9ty': 1}), u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']]
mapping_redu = [[0, 1, 2, 3], [0, 1, 2, 3, 4, 7, 11, 12, 13, 14, 15, 16, 17, 18]]
stemmed_text_container = [ [stemmer.stemWord(token) if isinstance(token, (str, unicode) ) else stemmer.stemWord(token[0]) for token in sent] for sent in redu_free_text_container ]
stats.insert_repl_into_db(text_list,json.loads(self.test_dict_row_en_1["text"]),extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container,redu_free_text_container,mapping_redu,stemmed_text_container)
stats._write_repl_into_db(thread_name="Thread0")
stats._write_redu_into_db(thread_name="Thread0")
#p(list(stats.statsdb.getall("replications")))
right_output = [(1, 1111, u'[4, 14]', u'[1, 4]', u'[1, 4]', u'very', u'ver^4y^5', u'veri', u'r', 4, 2, u'[1, 4]', u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'),
(2, 1111, u'[4, 14]', u'[1, 4]', u'[1, 4]', u'very', u'ver^4y^5', u'veri', u'y', 5, 3, u'[1, 4]', u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'),
(3, 1111, u'[4, 14]', u'[1, 5]', u'[1, 4]', u'very', u'v^3er^8y', u'veri', u'v', 3, 0, u'[1, 4]', u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'),
(4, 1111, u'[4, 14]', u'[1, 5]', u'[1, 4]', u'very', u'v^3er^8y', u'veri', u'r', 8, 2, u'[1, 4]', u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'),
(5, 1111, u'[4, 14]', u'[1, 7]', u'[1, 5]', u'pity', u'pi^9ty', u'piti', u'i', 9, 1, u'[1, 5]', u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]'),
(6, 1111, u'[4, 14]', u'[1, 10]', u'[1, 5]', u'pity', u'pi^3t^3y^3', u'piti', u'i', 3, 1, u'[1, 5]', u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]'),
(7, 1111, u'[4, 14]', u'[1, 10]', u'[1, 5]', u'pity', u'pi^3t^3y^3', u'piti', u't', 3, 2, u'[1, 5]', u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]'),
(8, 1111, u'[4, 14]', u'[1, 10]', u'[1, 5]', u'pity', u'pi^3t^3y^3', u'piti', u'y', 3, 3, u'[1, 5]', u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]'),
(9, 1111, u'[4, 14]', u'[1, 13]', u'[1, 8]', u'.', u'.^6', u'.', u'.', 6, 0, None, u'symbol', u'["negative", -0.1875]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]', u'#shetlife', u'["hashtag", null, "#shetlif"]', u'#readytogo', u'["hashtag", null, "#readytogo"]', u'http://www.absurd.com', u'["URL", null, "http://www.absurd.com"]'),
(10, 1111, u'[4, 14]', u'[1, 14]', u'[1, 9]', u':-(', u':-(^5', u':-(', u'(', 5, 2, None, u'EMOASC', u'["negative", -0.1875]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u'@real_trump', u'["mention", null, "@real_trump"]', u'#shetlife', u'["hashtag", null, "#shetlif"]', u'#readytogo', u'["hashtag", null, "#readytogo"]', u'http://www.absurd.com', u'["URL", null, "http://www.absurd.com"]', None, None)]
list(stats.statsdb.getall("replications")).should.be.equal(right_output)
# # ### ROW 2 ###
stats = Stats(mode=self.mode, )#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
stats.corp = corp
stats._corp_info = corp.info()
text_list = [self.test_dict_row_en_2["id"],self.test_dict_row_en_2["text"]]
rle_for_repl_in_text_container = [['', '', '', '', '', '', '', '', u'expla^5nation', ''], [u'ri^6ght', ''], ['', '', u'you^6', '', '', '', u'?^4']]
extracted_repl_in_text_container = [['', '', '', '', '', '', '', '', [(u'a', 5, 4)], ''], [[(u'i', 6, 1)], ''], ['', '', [(u'u', 6, 2)], '', '', '', [(u'?', 4, 0)]]]
repl_free_text_container = [[u'tiny', u'model', u',', u'but', u'a', u'big', u'big', u'big', u'explanation', u'.'], [u'right', u'?'], [u'what', u'do', u'you', u'think', u'about', u'it', u'?']]
redu_free_text_container = [[u'tiny', u'model', u',', u'but', u'a', (u'big', {u'big': 3}), u'explanation', u'.'], [u'right', u'?'], [u'what', u'do', u'you', u'think', u'about', u'it', u'?']]
mapping_redu = [[0, 1, 2, 3, 4, 5, 8, 9], [0, 1], [0, 1, 2, 3, 4, 5, 6]]
stemmed_text_container = [ [stemmer.stemWord(token) if isinstance(token, (str, unicode) ) else stemmer.stemWord(token[0]) for token in sent] for sent in redu_free_text_container ]
stats.insert_repl_into_db(text_list ,json.loads(self.test_dict_row_en_2["text"]),extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container,redu_free_text_container,mapping_redu,stemmed_text_container)
stats._write_repl_into_db(thread_name="Thread0")
stats._write_redu_into_db(thread_name="Thread0")
#p(list(stats.statsdb.getall("replications")))
right_output = [(1, 5555, u'[8, 2, 7]', u'[0, 8]', u'[0, 6]', u'explanation', u'expla^5nation', u'explan', u'a', 5, 4, None, u'NN', u'["neutral", 0.0]', u'model', u'["NN", null, "model"]', u',', u'["symbol", null, ","]', u'but', u'["CC", null, "but"]', u'a', u'["DT", null, "a"]', u'big', u'["JJ", {"big": 3}, "big"]', u'.', u'["symbol", null, "."]', u'right', u'["UH", null, "right"]', u'?', u'["symbol", null, "?"]', u'what', u'["WP", null, "what"]', u'do', u'["VBP", null, "do"]'),
(2, 5555, u'[8, 2, 7]', u'[1, 0]', u'[1, 0]', u'right', u'ri^6ght', u'right', u'i', 6, 1, None, u'UH', u'["neutral", 0.0]', u'but', u'["CC", null, "but"]', u'a', u'["DT", null, "a"]', u'big', u'["JJ", {"big": 3}, "big"]', u'explanation', u'["NN", null, "explan"]', u'.', u'["symbol", null, "."]', u'?', u'["symbol", null, "?"]', u'what', u'["WP", null, "what"]', u'do', u'["VBP", null, "do"]', u'you', u'["PRP", null, "you"]', u'think', u'["VB", null, "think"]'),
(3, 5555, u'[8, 2, 7]', u'[2, 2]', u'[2, 2]', u'you', u'you^6', u'you', u'u', 6, 2, None, u'PRP', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'right', u'["UH", null, "right"]', u'?', u'["symbol", null, "?"]', u'what', u'["WP", null, "what"]', u'do', u'["VBP", null, "do"]', u'think', u'["VB", null, "think"]', u'about', u'["IN", null, "about"]', u'it', u'["PRP", null, "it"]', u'?', u'["symbol", null, "?"]', None, None),
(4, 5555, u'[8, 2, 7]', u'[2, 6]', u'[2, 6]', u'?', u'?^4', u'?', u'?', 4, 0, None, u'symbol', u'["neutral", 0.0]', u'do', u'["VBP", null, "do"]', u'you', u'["PRP", null, "you"]', u'think', u'["VB", null, "think"]', u'about', u'["IN", null, "about"]', u'it', u'["PRP", null, "it"]', None, None, None, None, None, None, None, None, None, None)]
list(stats.statsdb.getall("replications")).should.be.equal(right_output)
########################
####### many_rows ######
########################
# # ### ROW 1 ###
stats = Stats(mode=self.mode, )#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
stats.corp = corp
stats._corp_info = corp.info()
text_list = [self.test_dict_row_en_1["id"],self.test_dict_row_en_1["text"]]
rle_for_repl_in_text_container = [['', '', '', ''], ['', '', '', '', u'ver^4y^5', u'v^3er^8y', '', u'pi^9ty', '', '', u'pi^3t^3y^3', '', '', u'.^6', u':-(^5', '', '', '', '']]
extracted_repl_in_text_container = [['', '', '', ''], ['', '', '', '', [(u'r', 4, 2), (u'y', 5, 3)], [(u'v', 3, 0), (u'r', 8, 2)], '', [(u'i', 9, 1)], '', '', [(u'i', 3, 1), (u't', 3, 2), (u'y', 3, 3)], '', '', [(u'.', 6, 0)], [(u'(', 5, 2)], '', '', '', '']]
repl_free_text_container = [[u'i', u'loved', u'it', u'.'], [u'but', u'it', u'was', u'also', u'very', u'very', u'very', u'pity', u'pity', u'pity', u'pity', u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']]
redu_free_text_container = [[u'i', u'loved', u'it', u'.'], [u'but', u'it', u'was', u'also', (u'very', {u'very': 1, u'ver^4y^5': 1, u'v^3er^8y': 1}), (u'pity', {u'pity': 2, u'pi^3t^3y^3': 1, u'pi^9ty': 1}), u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']]
mapping_redu = [[0, 1, 2, 3], [0, 1, 2, 3, 4, 7, 11, 12, 13, 14, 15, 16, 17, 18]]
stemmed_text_container = [ [stemmer.stemWord(token) if isinstance(token, (str, unicode) ) else stemmer.stemWord(token[0]) for token in sent] for sent in redu_free_text_container ]
stats.insert_repl_into_db(text_list,json.loads(self.test_dict_row_en_1["text"]),extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container,redu_free_text_container,mapping_redu,stemmed_text_container)
stats._write_repl_into_db(thread_name="Thread0")
stats._write_redu_into_db(thread_name="Thread0")
#p(list(stats.statsdb.getall("replications")))
#list(stats.statsdb.getall("replications")).should.be.equal([(1, 1111, u'[1, 4]', u'ver^4y^5', u'very', u'JJ', u'["negative", -0.1875]', u'r', 4, 2, u'.', u'symbol', u'but', u'CC', u'it', u'PRP', u'was', u'VBD', u'also', u'RB', u'very', u'NNP', u'very', u'RB', u'pity', u'JJ', u'pity', u'NN', u'pity', u'NN'), (2, 1111, u'[1, 4]', u'ver^4y^5', u'very', u'JJ', u'["negative", -0.1875]', u'y', 5, 3, u'.', u'symbol', u'but', u'CC', u'it', u'PRP', u'was', u'VBD', u'also', u'RB', u'very', u'NNP', u'very', u'RB', u'pity', u'JJ', u'pity', u'NN', u'pity', u'NN'), (3, 1111, u'[1, 5]', u'v^3er^8y', u'very', u'NNP', u'["negative", -0.1875]', u'v', 3, 0, u'but', u'CC', u'it', u'PRP', u'was', u'VBD', u'also', u'RB', u'very', u'JJ', u'very', u'RB', u'pity', u'JJ', u'pity', u'NN', u'pity', u'NN', u'pity', u'NN'), (4, 1111, u'[1, 5]', u'v^3er^8y', u'very', u'NNP', u'["negative", -0.1875]', u'r', 8, 2, u'but', u'CC', u'it', u'PRP', u'was', u'VBD', u'also', u'RB', u'very', u'JJ', u'very', u'RB', u'pity', u'JJ', u'pity', u'NN', u'pity', u'NN', u'pity', u'NN'), (5, 1111, u'[1, 7]', u'pi^9ty', u'pity', u'JJ', u'["negative", -0.1875]', u'i', 9, 1, u'was', u'VBD', u'also', u'RB', u'very', u'JJ', u'very', u'NNP', u'very', u'RB', u'pity', u'NN', u'pity', u'NN', u'pity', u'NN', u'for', u'IN', u'me', u'PRP'), (6, 1111, u'[1, 10]', u'pi^3t^3y^3', u'pity', u'NN', u'["negative", -0.1875]', u'i', 3, 1, u'very', u'NNP', u'very', u'RB', u'pity', u'JJ', u'pity', u'NN', u'pity', u'NN', u'for', u'IN', u'me', u'PRP', u'.', u'symbol', u':-(', u'EMOASC', u'@real_trump', u'mention'), (7, 1111, u'[1, 10]', u'pi^3t^3y^3', u'pity', u'NN', u'["negative", -0.1875]', u't', 3, 2, u'very', u'NNP', u'very', u'RB', u'pity', u'JJ', u'pity', u'NN', u'pity', u'NN', u'for', u'IN', u'me', u'PRP', u'.', u'symbol', u':-(', u'EMOASC', u'@real_trump', u'mention'), (8, 1111, u'[1, 10]', u'pi^3t^3y^3', u'pity', u'NN', u'["negative", -0.1875]', u'y', 3, 3, u'very', u'NNP', u'very', u'RB', u'pity', u'JJ', u'pity', u'NN', u'pity', u'NN', u'for', u'IN', u'me', u'PRP', u'.', u'symbol', u':-(', u'EMOASC', u'@real_trump', u'mention'), (9, 1111, u'[1, 13]', u'.^6', u'.', u'symbol', u'["negative", -0.1875]', u'.', 6, 0, u'pity', u'NN', u'pity', u'NN', u'pity', u'NN', u'for', u'IN', u'me', u'PRP', u':-(', u'EMOASC', u'@real_trump', u'mention', u'#shetlife', u'hashtag', u'#readytogo', u'hashtag', u'http://www.absurd.com', u'URL'), (10, 1111, u'[1, 14]', u':-(^5', u':-(', u'EMOASC', u'["negative", -0.1875]', u'(', 5, 2, u'pity', u'NN', u'pity', u'NN', u'for', u'IN', u'me', u'PRP', u'.', u'symbol', u'@real_trump', u'mention', u'#shetlife', u'hashtag', u'#readytogo', u'hashtag', u'http://www.absurd.com', u'URL', u'[]', u'[]')])
# # ### ROW 2 ###
text_list = [self.test_dict_row_en_2["id"],self.test_dict_row_en_2["text"]]
rle_for_repl_in_text_container = [['', '', '', '', '', '', '', '', u'expla^5nation', ''], [u'ri^6ght', ''], ['', '', u'you^6', '', '', '', u'?^4']]
extracted_repl_in_text_container = [['', '', '', '', '', '', '', '', [(u'a', 5, 4)], ''], [[(u'i', 6, 1)], ''], ['', '', [(u'u', 6, 2)], '', '', '', [(u'?', 4, 0)]]]
repl_free_text_container = [[u'tiny', u'model', u',', u'but', u'a', u'big', u'big', u'big', u'explanation', u'.'], [u'right', u'?'], [u'what', u'do', u'you', u'think', u'about', u'it', u'?']]
redu_free_text_container = [[u'tiny', u'model', u',', u'but', u'a', (u'big', {u'big': 3}), u'explanation', u'.'], [u'right', u'?'], [u'what', u'do', u'you', u'think', u'about', u'it', u'?']]
mapping_redu = [[0, 1, 2, 3, 4, 5, 8, 9], [0, 1], [0, 1, 2, 3, 4, 5, 6]]
stemmed_text_container = [ [stemmer.stemWord(token) if isinstance(token, (str, unicode) ) else stemmer.stemWord(token[0]) for token in sent] for sent in redu_free_text_container ]
stats.insert_repl_into_db(text_list,json.loads(self.test_dict_row_en_2["text"]),extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container,redu_free_text_container,mapping_redu,stemmed_text_container)
stats._write_repl_into_db(thread_name="Thread0")
stats._write_redu_into_db(thread_name="Thread0")
#p(list(stats.statsdb.getall("replications")))
right_output = [(1, 1111, u'[4, 14]', u'[1, 4]', u'[1, 4]', u'very', u'ver^4y^5', u'veri', u'r', 4, 2, u'[1, 4]', u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'), (
2, 1111, u'[4, 14]', u'[1, 4]', u'[1, 4]', u'very', u'ver^4y^5', u'veri', u'y', 5, 3, u'[1, 4]', u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'), (
3, 1111, u'[4, 14]', u'[1, 5]', u'[1, 4]', u'very', u'v^3er^8y', u'veri', u'v', 3, 0, u'[1, 4]', u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'), (
4, 1111, u'[4, 14]', u'[1, 5]', u'[1, 4]', u'very', u'v^3er^8y', u'veri', u'r', 8, 2, u'[1, 4]', u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'), (
5, 1111, u'[4, 14]', u'[1, 7]', u'[1, 5]', u'pity', u'pi^9ty', u'piti', u'i', 9, 1, u'[1, 5]', u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]'), (
6, 1111, u'[4, 14]', u'[1, 10]', u'[1, 5]', u'pity', u'pi^3t^3y^3', u'piti', u'i', 3, 1, u'[1, 5]', u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]'), (
7, 1111, u'[4, 14]', u'[1, 10]', u'[1, 5]', u'pity', u'pi^3t^3y^3', u'piti', u't', 3, 2, u'[1, 5]', u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]'), (
8, 1111, u'[4, 14]', u'[1, 10]', u'[1, 5]', u'pity', u'pi^3t^3y^3', u'piti', u'y', 3, 3, u'[1, 5]', u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]'), (
9, 1111, u'[4, 14]', u'[1, 13]', u'[1, 8]', u'.', u'.^6', u'.', u'.', 6, 0, None, u'symbol', u'["negative", -0.1875]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]', u'#shetlife', u'["hashtag", null, "#shetlif"]', u'#readytogo', u'["hashtag", null, "#readytogo"]', u'http://www.absurd.com', u'["URL", null, "http://www.absurd.com"]'), (
10, 1111, u'[4, 14]', u'[1, 14]', u'[1, 9]', u':-(', u':-(^5', u':-(', u'(', 5, 2, None, u'EMOASC', u'["negative", -0.1875]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u'@real_trump', u'["mention", null, "@real_trump"]', u'#shetlife', u'["hashtag", null, "#shetlif"]', u'#readytogo', u'["hashtag", null, "#readytogo"]', u'http://www.absurd.com', u'["URL", null, "http://www.absurd.com"]', None, None), (
11, 5555, u'[8, 2, 7]', u'[0, 8]', u'[0, 6]', u'explanation', u'expla^5nation', u'explan', u'a', 5, 4, None, u'NN', u'["neutral", 0.0]', u'model', u'["NN", null, "model"]', u',', u'["symbol", null, ","]', u'but', u'["CC", null, "but"]', u'a', u'["DT", null, "a"]', u'big', u'["JJ", {"big": 3}, "big"]', u'.', u'["symbol", null, "."]', u'right', u'["UH", null, "right"]', u'?', u'["symbol", null, "?"]', u'what', u'["WP", null, "what"]', u'do', u'["VBP", null, "do"]'), (
12, 5555, u'[8, 2, 7]', u'[1, 0]', u'[1, 0]', u'right', u'ri^6ght', u'right', u'i', 6, 1, None, u'UH', u'["neutral", 0.0]', u'but', u'["CC", null, "but"]', u'a', u'["DT", null, "a"]', u'big', u'["JJ", {"big": 3}, "big"]', u'explanation', u'["NN", null, "explan"]', u'.', u'["symbol", null, "."]', u'?', u'["symbol", null, "?"]', u'what', u'["WP", null, "what"]', u'do', u'["VBP", null, "do"]', u'you', u'["PRP", null, "you"]', u'think', u'["VB", null, "think"]'), (
13, 5555, u'[8, 2, 7]', u'[2, 2]', u'[2, 2]', u'you', u'you^6', u'you', u'u', 6, 2, None, u'PRP', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'right', u'["UH", null, "right"]', u'?', u'["symbol", null, "?"]', u'what', u'["WP", null, "what"]', u'do', u'["VBP", null, "do"]', u'think', u'["VB", null, "think"]', u'about', u'["IN", null, "about"]', u'it', u'["PRP", null, "it"]', u'?', u'["symbol", null, "?"]', None, None), (
14, 5555, u'[8, 2, 7]', u'[2, 6]', u'[2, 6]', u'?', u'?^4', u'?', u'?', 4, 0, None, u'symbol', u'["neutral", 0.0]', u'do', u'["VBP", null, "do"]', u'you', u'["PRP", null, "you"]', u'think', u'["VB", null, "think"]', u'about', u'["IN", null, "about"]', u'it', u'["PRP", null, "it"]', None, None, None, None, None, None, None, None, None, None)]
stats.statsdb.getall("replications").should.be.equal(right_output)
@attr(status='stable')
#@wipd
def test_extract_redu_lower_case_603(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode, )#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Corpus(logger_level=logging.DEBUG)
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
#p(stats.statsdb, "stats.statsdb")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
stats.corp = corp
stats._corp_info = corp.info()
## DE####
### ROW 1 ###
#p(json.loads(self.test_dict_row_de_1["text"]), "text_elem")
#text_elem = [[ [[t[0].lower(), t[1]] for t in s[0] ], s[1]] for s in json.loads(self.test_dict_row_de_1["text"]) ]
#p(text_elem, "text_elem")
repl_free_de_row_lowercased_1 = [[u'klitze', u'klitze', u'kleine', u'kleine', u'\xfcberaschung', u'.'], [u'trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)']]
rle_for_repl_in_text_container = [['', u'kli^4tze', u'kle^5ine', u'klein^3e', '', ''], ['', '', '', '', '', '', '', u':-)^4', u'-)^3']]
extracted_redu_in_text_container, redu_free_text_container,mapping_redu = stats.extract_reduplications(repl_free_de_row_lowercased_1,rle_for_repl_in_text_container)
#p([t[0] for i in json.loads(self.test_dict_row_de_1["text"]) for t in i[0]])
# p((len(extracted_redu_in_text_container),extracted_redu_in_text_container), "extracted_redu_in_text_container")
#p((len(redu_free_text_container),redu_free_text_container), "redu_free_text_container")
#p(mapping_redu, "mapping_redu")
extracted_redu_in_text_container.should.be.equal([[{'start_index_in_orig': 0, 'length': 2, 'word': u'klitze', 'index_in_redu_free': 0}, {'start_index_in_orig': 2, 'length': 2, 'word': u'kleine', 'index_in_redu_free': 1}], []])
redu_free_text_container.should.be.equal([[(u'klitze', {u'klitze': 1, u'kli^4tze': 1}), (u'kleine', {u'kle^5ine': 1, u'klein^3e': 1}), u'\xfcberaschung', u'.'], [u'trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)']])
mapping_redu.should.be.equal([[0, 2, 4, 5], [0, 1, 2, 3, 4, 5, 6, 7, 8]])
# ### ROW 2 ###
#text_elem = [[ [[t[0].lower(), t[1]] for t in s[0] ], s[1]] for s in json.loads(self.test_dict_row_de_1["text"]) ]
repl_free_de_row_lowercased_2 = [[u'einen', u'wundersch\xf6nen', u'tag', u'w\xfcnsche', u'ich', u'euch', u'.'], [u'geniest', u'genist', u'das', u'leben', u'.'], [u'bleibt', u'bleibt', u'hungrig', u'.', u'\U0001f600', u'\U0001f308']]
rle_for_repl_in_text_container = [['', '', u'ta^6g^6', '', '', '', ''], [u'genie^11s^2t', u'geni^13st', '', '', ''], [u'ble^8ibt', u'ble^4ibt', u'hu^12ngrig', '', u'\U0001f600^5', u'\U0001f308^7']]
extracted_redu_in_text_container, redu_free_text_container,mapping_redu = stats.extract_reduplications(repl_free_de_row_lowercased_2,rle_for_repl_in_text_container)
#p([t[0] for i in json.loads(self.test_dict_row_de_2["text"]) for t in i[0]])
# #p((len(extracted_redu_in_text_container),extracted_redu_in_text_container), "extracted_redu_in_text_container")
#p((len(redu_free_text_container),redu_free_text_container), "redu_free_text_container")
#p(mapping_redu,"mapping_redu")
extracted_redu_in_text_container.should.be.equal([[], [], [{'start_index_in_orig': 0, 'length': 2, 'word': u'bleibt', 'index_in_redu_free': 0}]])
redu_free_text_container.should.be.equal([[u'einen', u'wundersch\xf6nen', u'tag', u'w\xfcnsche', u'ich', u'euch', u'.'], [u'geniest', u'genist', u'das', u'leben', u'.'], [(u'bleibt', {u'ble^4ibt': 1, u'ble^8ibt': 1}), u'hungrig', u'.', u'\U0001f600', u'\U0001f308']])
mapping_redu.should.be.equal([[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4], [0, 2, 3, 4, 5]])
# # # ########### EN ##############
# ### ROW 1 ###
repl_free_en_row_lowercased_1 = [[u'i', u'loved', u'it', u'.'], [u'but', u'it', u'was', u'also', u'very', u'very', u'very', u'pity', u'pity', u'pity', u'pity', u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']]
rle_for_repl_in_text_container = [['', '', '', ''], ['', '', '', '', u'ver^4y^5', u'v^3er^8y', '', u'pi^9ty', '', '', u'pi^3t^3y^3', '', '', u'.^6', u':-(^5', '', '', '', '']]
extracted_redu_in_text_container, redu_free_text_container,mapping_redu = stats.extract_reduplications(repl_free_en_row_lowercased_1,rle_for_repl_in_text_container)
#p([t[0] for i in json.loads(self.test_dict_row_en_1["text"]) for t in i[0]])
# #p((len(extracted_redu_in_text_container),extracted_redu_in_text_container), "extracted_redu_in_text_container")
#p((len(redu_free_text_container),redu_free_text_container), "redu_free_text_container")
#p(mapping_redu,"mapping_redu")
extracted_redu_in_text_container.should.be.equal([[], [{'start_index_in_orig': 4, 'length': 3, 'word': u'very', 'index_in_redu_free': 4}, {'start_index_in_orig': 7, 'length': 4, 'word': u'pity', 'index_in_redu_free': 5}]])
redu_free_text_container.should.be.equal([[u'i', u'loved', u'it', u'.'], [u'but', u'it', u'was', u'also', (u'very', {u'very': 1, u'ver^4y^5': 1, u'v^3er^8y': 1}), (u'pity', {u'pity': 2, u'pi^3t^3y^3': 1, u'pi^9ty': 1}), u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']])
mapping_redu.should.be.equal([[0, 1, 2, 3], [0, 1, 2, 3, 4, 7, 11, 12, 13, 14, 15, 16, 17, 18]])
# ### ROW 2 ###
repl_free_en_row_lowercased_2 = [[u'tiny', u'model', u',', u'but', u'a', u'big', u'big', u'big', u'explanation', u'.'], [u'right', u'?'], [u'what', u'do', u'you', u'think', u'about', u'it', u'?']]
rle_for_repl_in_text_container = [['', '', '', '', '', '', '', '', u'expla^5nation', ''], [u'ri^6ght', ''], ['', '', u'you^6', '', '', '', u'?^4']]
extracted_redu_in_text_container, redu_free_text_container,mapping_redu = stats.extract_reduplications(repl_free_en_row_lowercased_2,rle_for_repl_in_text_container)
#p([t[0] for i in json.loads(self.test_dict_row_en_2["text"]) for t in i[0]])
# #p((len(extracted_redu_in_text_container),extracted_redu_in_text_container), "extracted_redu_in_text_container")
#p((len(redu_free_text_container),redu_free_text_container), "redu_free_text_container")
#p(mapping_redu,"mapping_redu")
extracted_redu_in_text_container.should.be.equal([[{'start_index_in_orig': 5, 'length': 3, 'word': u'big', 'index_in_redu_free': 5}], [], []])
redu_free_text_container.should.be.equal([[u'tiny', u'model', u',', u'but', u'a', (u'big', {u'big': 3}), u'explanation', u'.'], [u'right', u'?'], [u'what', u'do', u'you', u'think', u'about', u'it', u'?']])
mapping_redu.should.be.equal([[0, 1, 2, 3, 4, 5, 8, 9], [0, 1], [0, 1, 2, 3, 4, 5, 6]])
@attr(status='stable')
#@wipd
def test_extract_redu_case_sensitive_604(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode)#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Corpus(logger_level=logging.DEBUG)
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, case_sensitiv=True, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
#p(stats.statsdb, "stats.statsdb")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
stats.corp = corp
stats._corp_info = corp.info()
### DE####
### ROW 1 ###
repl_free_de_row_case_sensitive_1 = [[u'Klitze', u'klitze', u'kleEine', u'kleine', u'\xdcberaschung', u'.'], [u'Trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)']]
rle_for_repl_in_text_container = [['', u'kli^4tze', u'kleE^4ine', u'klein^3e', '', ''], ['', '', '', '', '', '', '', u':-)^4', u'-)^3']]
extracted_redu_in_text_container, redu_free_text_container,mapping_redu = stats.extract_reduplications( repl_free_de_row_case_sensitive_1,rle_for_repl_in_text_container)
#p([t[0] for i in json.loads(self.test_dict_row_de_1["text"]) for t in i[0]])
#p((len(extracted_redu_in_text_container),extracted_redu_in_text_container), "extracted_redu_in_text_container")
#p((len(redu_free_text_container),redu_free_text_container), "redu_free_text_container")
#p(mapping_redu, "mapping_redu")
extracted_redu_in_text_container.should.be.equal([[], []])
redu_free_text_container.should.be.equal([[u'Klitze', u'klitze', u'kleEine', u'kleine', u'\xdcberaschung', u'.'], [u'Trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)']])
mapping_redu.should.be.equal([[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5, 6, 7, 8]])
### ROW 2 ###
repl_free_de_row_case_sensitive_2 = [[u'einen', u'wundersch\xf6nen', u'Tag', u'w\xfcnsche', u'ich', u'euch', u'.'], [u'Geniest', u'genist', u'das', u'Leben', u'.'], [u'Bleibt', u'bleibt', u'Hungrig', u'.', u'\U0001f600', u'\U0001f308']]
rle_for_repl_in_text_container = [['', '', u'Ta^6g^6', '', '', '', ''], [u'Genie^11s^2t', u'geni^13st', '', '', ''], [u'Ble^8ibt', u'ble^4ibt', u'Hu^12ngrig', '', u'\U0001f600^5', u'\U0001f308^7']]
extracted_redu_in_text_container, redu_free_text_container,mapping_redu = stats.extract_reduplications( repl_free_de_row_case_sensitive_2,rle_for_repl_in_text_container)
#p([t[0] for i in json.loads(self.test_dict_row_de_2["text"]) for t in i[0]])
#p((len(extracted_redu_in_text_container),extracted_redu_in_text_container), "extracted_redu_in_text_container")
#p((len(redu_free_text_container),redu_free_text_container), "redu_free_text_container")
#p(mapping_redu, "mapping_redu")
extracted_redu_in_text_container.should.be.equal([[], [], []])
redu_free_text_container.should.be.equal([[u'einen', u'wundersch\xf6nen', u'Tag', u'w\xfcnsche', u'ich', u'euch', u'.'], [u'Geniest', u'genist', u'das', u'Leben', u'.'], [u'Bleibt', u'bleibt', u'Hungrig', u'.', u'\U0001f600', u'\U0001f308']])
mapping_redu.should.be.equal([[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4, 5]])
# # # ########### EN ##############
### ROW 1 ###
repl_free_en_row_case_sensitive_1 = [[u'I', u'loved', u'it', u'.'], [u'But', u'it', u'was', u'also', u'very', u'veRry', u'very', u'pity', u'pity', u'pity', u'pity', u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']]
rle_for_repl_in_text_container = [['', '', '', ''], ['', '', '', '', u'ver^4y^5', u'v^3eR^6r^2y', '', u'pi^9ty', '', '', u'pi^3t^3y^3', '', '', u'.^6', u':-(^5', '', '', '', '']]
extracted_redu_in_text_container, redu_free_text_container,mapping_redu = stats.extract_reduplications( repl_free_en_row_case_sensitive_1,rle_for_repl_in_text_container)
#p([t[0] for i in json.loads(self.test_dict_row_en_1["text"]) for t in i[0]])
# p((len(extracted_redu_in_text_container),extracted_redu_in_text_container), "extracted_redu_in_text_container")
#p((len(redu_free_text_container),redu_free_text_container), "redu_free_text_container")
#p(mapping_redu, "mapping_redu")
extracted_redu_in_text_container.should.be.equal([[], [{'start_index_in_orig': 7, 'length': 4, 'word': u'pity', 'index_in_redu_free': 7}]])
redu_free_text_container.should.be.equal([[u'I', u'loved', u'it', u'.'], [u'But', u'it', u'was', u'also', u'very', u'veRry', u'very', (u'pity', {u'pity': 2, u'pi^3t^3y^3': 1, u'pi^9ty': 1}), u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']])
mapping_redu.should.be.equal([[0, 1, 2, 3], [0, 1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 16, 17, 18]])
### ROW 2 ###
repl_free_en_row_case_sensitive_2 = [[u'Tiny', u'model', u',', u'but', u'a', u'big', u'big', u'big', u'explanation', u'.'], [u'Right', u'?'], [u'What', u'do', u'you', u'think', u'about', u'it', u'?']]
rle_for_repl_in_text_container = [['', '', '', '', '', '', '', '', u'expla^5nation', ''], [u'Ri^6ght', ''], ['', '', u'you^6', '', '', '', u'?^4']]
extracted_redu_in_text_container, redu_free_text_container,mapping_redu = stats.extract_reduplications( repl_free_en_row_case_sensitive_2,rle_for_repl_in_text_container)
#p([t[0] for i in json.loads(self.test_dict_row_en_2["text"]) for t in i[0]])
# p((len(extracted_redu_in_text_container),extracted_redu_in_text_container), "extracted_redu_in_text_container")
#p((len(redu_free_text_container),redu_free_text_container), "redu_free_text_container")
#p(mapping_redu, "mapping_redu")
extracted_redu_in_text_container.should.be.equal([[{'start_index_in_orig': 5, 'length': 3, 'word': u'big', 'index_in_redu_free': 5}], [], []])
redu_free_text_container.should.be.equal([[u'Tiny', u'model', u',', u'but', u'a', (u'big', {u'big': 3}), u'explanation', u'.'], [u'Right', u'?'], [u'What', u'do', u'you', u'think', u'about', u'it', u'?']])
mapping_redu.should.be.equal([[0, 1, 2, 3, 4, 5, 8, 9], [0, 1], [0, 1, 2, 3, 4, 5, 6]])
# def compute_baseline_1(self, redu_free_text_container, context_right=5, context_left=5):
# inp_token_list = [token for sent in redu_free_text_container for token in sent]
# computed_baseline = []
# ngramm_lenght = context_right+1+context_left
# for n in xrange(1,ngramm_lenght+1):
# computed_baseline += [tuple(inp_token_list[i:i+n]) for i in xrange(len(inp_token_list)-n+1)]
# return computed_baseline
@attr(status='stable')
#@wipd
def test_insert_redu_into_db_lower_case_605(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode, )#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
### DE####
import Stemmer
stemmer = Stemmer.Stemmer("de")
### ROW 1 ###
stats = Stats(mode=self.mode, )#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
stats.corp = corp
stats._corp_info = corp.info()
repl_free_de_row_lowercased_1 = [[u'klitze', u'klitze', u'kleine', u'kleine', u'\xfcberaschung', u'.'], [u'trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)']]
#p([t[0] for i in json.loads(self.test_dict_row_de_1["text"]) for t in i[0]])
#p(self.test_dict_row_de_1["text"])
text_list = [self.test_dict_row_de_1["id"],self.test_dict_row_de_1["text"]]
extracted_redu_in_text_container = [[{'start_index_in_orig': 0, 'length': 2, 'word': u'klitze', 'index_in_redu_free': 0}, {'start_index_in_orig': 2, 'length': 2, 'word': u'kleine', 'index_in_redu_free': 1}], []]
redu_free_text_container = [[(u'klitze', {u'klitze': 1, u'kli^4tze': 1}), (u'kleine', {u'kle^5ine': 1, u'klein^3e': 1}), u'\xfcberaschung', u'.'], [u'trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)']]
rle_for_repl_in_text_container = [['', u'kli^4tze', u'kle^5ine', u'klein^3e', '', ''], ['', '', '', '', '', '', '', u':-)^4', u'-)^3']]
repl_free_text_container = [[u'klitze', u'klitze', u'kleine', u'kleine', u'\xfcberaschung', u'.'], [u'trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)']]
mapping_redu = [[0, 2, 4, 5], [0, 1, 2, 3, 4, 5, 6, 7, 8]]
stemmed_text_container = [ [stemmer.stemWord(token) if isinstance(token, (str, unicode) ) else stemmer.stemWord(token[0]) for token in sent] for sent in redu_free_text_container ]
stats.insert_redu_into_db(text_list,json.loads(self.test_dict_row_de_1["text"]),extracted_redu_in_text_container, redu_free_text_container, rle_for_repl_in_text_container, repl_free_text_container,mapping_redu, stemmed_text_container)
stats._write_repl_into_db(thread_name="Thread0")
stats._write_redu_into_db(thread_name="Thread0")
#p(list(stats.statsdb.getall("reduplications")))
inserted_columns =list(stats.statsdb.getall("reduplications"))
right_output = [(1, 8888, u'[4, 9]', u'[0, 0]', u'[0, 0]', u'klitze', u'klitz', u'{"klitze": 1, "kli^4tze": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'), (2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5ine": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]')]
inserted_columns.should.be.equal(right_output)
# ### ROW 2 ###
stats = Stats(mode=self.mode, )#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
stats.corp = corp
stats._corp_info = corp.info()
repl_free_de_row_lowercased_2 = [[u'einen', u'wundersch\xf6nen', u'tag', u'w\xfcnsche', u'ich', u'euch', u'.'], [u'geniest', u'genist', u'das', u'leben', u'.'], [u'bleibt', u'bleibt', u'hungrig', u'.', u'\U0001f600', u'\U0001f308']]
#p([t[0] for i in json.loads(self.test_dict_row_de_2["text"]) for t in i[0]], c="r")
#p(json.loads(self.test_dict_row_de_2["text"]))
text_list = [self.test_dict_row_de_1["id"],self.test_dict_row_de_1["text"]]
extracted_redu_in_text_container = [[], [], [{'start_index_in_orig': 0, 'length': 2, 'word': u'bleibt', 'index_in_redu_free': 0}]]
redu_free_text_container = [[u'einen', u'wundersch\xf6nen', u'tag', u'w\xfcnsche', u'ich', u'euch', u'.'], [u'geniest', u'genist', u'das', u'leben', u'.'], [(u'bleibt', {u'ble^4ibt': 1, u'ble^8ibt': 1}), u'hungrig', u'.', u'\U0001f600', u'\U0001f308']]
rle_for_repl_in_text_container = [['', '', u'ta^6g^6', '', '', '', ''], [u'genie^11s^2t', u'geni^13st', '', '', ''], [u'ble^8ibt', u'ble^4ibt', u'hu^12ngrig', '', u'\U0001f600^5', u'\U0001f308^7']]
repl_free_text_container = [[u'einen', u'wundersch\xf6nen', u'tag', u'w\xfcnsche', u'ich', u'euch', u'.'], [u'geniest', u'genist', u'das', u'leben', u'.'], [u'bleibt', u'bleibt', u'hungrig', u'.', u'\U0001f600', u'\U0001f308']]
mapping_redu = [[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4], [0, 2, 3, 4, 5]]
stemmed_text_container = [ [stemmer.stemWord(token) if isinstance(token, (str, unicode) ) else stemmer.stemWord(token[0]) for token in sent] for sent in redu_free_text_container ]
stats.insert_redu_into_db(text_list,json.loads(self.test_dict_row_de_2["text"]),extracted_redu_in_text_container, redu_free_text_container, rle_for_repl_in_text_container, repl_free_text_container,mapping_redu,stemmed_text_container)
stats._write_repl_into_db(thread_name="Thread0")
stats._write_redu_into_db(thread_name="Thread0")
#p(list(stats.statsdb.getall("reduplications")))
inserted_columns =list(stats.statsdb.getall("reduplications"))
right_output = [(1, 8888, u'[7, 5, 5]', u'[2, 0]', u'[2, 0]', u'bleibt', u'bleibt', u'{"ble^4ibt": 1, "ble^8ibt": 1}', 2, u'NN', u'["neutral", 0.0]', u'geniest', u'["NN", null, "geni"]', u'genist', u'["VVFIN", null, "genist"]', u'das', u'["ART", null, "das"]', u'leben', u'["NN", null, "leb"]', u'.', u'["symbol", null, "."]', u'hungrig', u'["NN", null, "hungrig"]', u'.', u'["symbol", null, "."]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', None, None)]
inserted_columns.should.be.equal(right_output)
# ########### EN ##############
stemmer = Stemmer.Stemmer("en")
# ### ROW 1 ###
stats = Stats(mode=self.mode, )#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
stats.corp = corp
stats._corp_info = corp.info()
repl_free_en_row_lowercased_1 = [[u'i', u'loved', u'it', u'.'], [u'but', u'it', u'was', u'also', u'very', u'very', u'very', u'pity', u'pity', u'pity', u'pity', u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']]
#p([t[0] for i in json.loads(self.test_dict_row_en_1["text"]) for t in i[0]])
#p(json.loads(self.test_dict_row_en_1["text"]))
text_list = [self.test_dict_row_en_1["id"],self.test_dict_row_en_1["text"]]
extracted_redu_in_text_container = [[], [{'start_index_in_orig': 4, 'length': 3, 'word': u'very', 'index_in_redu_free': 4}, {'start_index_in_orig': 7, 'length': 4, 'word': u'pity', 'index_in_redu_free': 5}]]
redu_free_text_container = [[u'i', u'loved', u'it', u'.'], [u'but', u'it', u'was', u'also', (u'very', {u'very': 1, u'ver^4y^5': 1, u'v^3er^8y': 1}), (u'pity', {u'pity': 2, u'pi^3t^3y^3': 1, u'pi^9ty': 1}), u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']]
rle_for_repl_in_text_container = [['', '', '', ''], ['', '', '', '', u'ver^4y^5', u'v^3er^8y', '', u'pi^9ty', '', '', u'pi^3t^3y^3', '', '', u'.^6', u':-(^5', '', '', '', '']]
repl_free_text_container =[[u'i', u'loved', u'it', u'.'], [u'but', u'it', u'was', u'also', u'very', u'very', u'very', u'pity', u'pity', u'pity', u'pity', u'for', u'me', u'.', u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com']]
mapping_redu = [[0, 1, 2, 3], [0, 1, 2, 3, 4, 7, 11, 12, 13, 14, 15, 16, 17, 18]]
stemmed_text_container = [ [stemmer.stemWord(token) if isinstance(token, (str, unicode) ) else stemmer.stemWord(token[0]) for token in sent] for sent in redu_free_text_container ]
stats.insert_redu_into_db(text_list,json.loads(self.test_dict_row_en_1["text"]),extracted_redu_in_text_container, redu_free_text_container, rle_for_repl_in_text_container,repl_free_text_container,mapping_redu,stemmed_text_container)
stats._write_repl_into_db(thread_name="Thread0")
stats._write_redu_into_db(thread_name="Thread0")
#p(list(stats.statsdb.getall("reduplications")))
inserted_columns =list(stats.statsdb.getall("reduplications"))
right_output = [(1, 1111, u'[4, 14]', u'[1, 4]', u'[1, 4]', u'very', u'veri', u'{"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}', 3, u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'), (2, 1111, u'[4, 14]', u'[1, 7]', u'[1, 5]', u'pity', u'piti', u'{"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}', 4, u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]')]
inserted_columns.should.be.equal(right_output)
# ### ROW 2 ###
stats = Stats(mode=self.mode, )#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
stats.corp = corp
stats._corp_info = corp.info()
repl_free_en_row_lowercased_2 = [[u'tiny', u'model', u',', u'but', u'a', u'big', u'big', u'big', u'explanation', u'.'], [u'right', u'?'], [u'what', u'do', u'you', u'think', u'about', u'it', u'?']]
#p([t[0] for i in json.loads(self.test_dict_row_en_2["text"]) for t in i[0]])
#p(json.loads(self.test_dict_row_en_2["text"]))
text_list = [self.test_dict_row_en_2["id"],self.test_dict_row_en_2["text"]]
extracted_redu_in_text_container = [[{'start_index_in_orig': 5, 'length': 3, 'word': u'big', 'index_in_redu_free': 5}], [], []]
redu_free_text_container = [[u'tiny', u'model', u',', u'but', u'a', (u'big', {u'big': 3}), u'explanation', u'.'], [u'right', u'?'], [u'what', u'do', u'you', u'think', u'about', u'it', u'?']]
rle_for_repl_in_text_container = [['', '', '', '', '', '', '', '', u'expla^5nation', ''], [u'ri^6ght', ''], ['', '', u'you^6', '', '', '', u'?^4']]
repl_free_text_container = [[u'tiny', u'model', u',', u'but', u'a', u'big', u'big', u'big', u'explanation', u'.'], [u'right', u'?'], [u'what', u'do', u'you', u'think', u'about', u'it', u'?']]
mapping_redu = [[0, 1, 2, 3, 4, 5, 8, 9], [0, 1], [0, 1, 2, 3, 4, 5, 6]]
stemmed_text_container = [ [stemmer.stemWord(token) if isinstance(token, (str, unicode) ) else stemmer.stemWord(token[0]) for token in sent] for sent in redu_free_text_container ]
stats.insert_redu_into_db(text_list,json.loads(self.test_dict_row_en_2["text"]),extracted_redu_in_text_container, redu_free_text_container, rle_for_repl_in_text_container, repl_free_text_container,mapping_redu,stemmed_text_container)
stats._write_repl_into_db(thread_name="Thread0")
stats._write_redu_into_db(thread_name="Thread0")
#p(list(stats.statsdb.getall("reduplications")))
inserted_columns =list(stats.statsdb.getall("reduplications"))
right_output = [(1, 5555, u'[8, 2, 7]', u'[0, 5]', u'[0, 5]', u'big', u'big', u'{"big": 3}', 3, u'JJ', u'["neutral", 0.0]', u'tiny', u'["JJ", null, "tini"]', u'model', u'["NN", null, "model"]', u',', u'["symbol", null, ","]', u'but', u'["CC", null, "but"]', u'a', u'["DT", null, "a"]', u'explanation', u'["NN", null, "explan"]', u'.', u'["symbol", null, "."]', u'right', u'["UH", null, "right"]', u'?', u'["symbol", null, "?"]', u'what', u'["WP", null, "what"]')]
inserted_columns.should.be.equal(right_output)
@attr(status='stable')
#@wipd
def test_compute_baseline_lowercased_606(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode, )#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Corpus(logger_level=logging.DEBUG)
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
#p(stats.statsdb, "stats.statsdb")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
stats.corp = corp
stats._corp_info = corp.info()
# ##### FAKE SENT 1 #######
inp = [[u'klitze', u'kleine', u'\xfcberaschung', u'.']]
extracted_redu_in_text_container = [[{}]]
#assert set(stats.compute_baseline(inp)) == set(self.compute_baseline_1(inp))
#p( stats.compute_baseline(inp,extracted_redu_in_text_container))
stats.compute_baseline(inp,extracted_redu_in_text_container).should.be.equal([(u'klitze',), (u'kleine',), (u'\xfcberaschung',), (u'.',), (u'klitze', u'kleine'), (u'kleine', u'\xfcberaschung'), (u'\xfcberaschung', u'.'), (u'klitze', u'kleine', u'\xfcberaschung'), (u'kleine', u'\xfcberaschung', u'.'), (u'klitze', u'kleine', u'\xfcberaschung', u'.')])
# # ##### FAKE SENT 2#######
inp = [[u'1', u'2', u'3', u'4', u'5', u'😂', u'🧑🏻']]
extracted_redu_in_text_container = [[{}]]
#assert set(stats.compute_baseline(inp)) == set(self.compute_baseline_1(inp))
#p( stats.compute_baseline(inp))
stats.compute_baseline(inp,extracted_redu_in_text_container).should.be.equal([(u'1',), (u'2',), (u'3',), (u'4',), (u'5',), (u'\U0001f602',), (u'\U0001f9d1\U0001f3fb',), (u'1', u'2'), (u'2', u'3'), (u'3', u'4'), (u'4', u'5'), (u'5', u'\U0001f602'), (u'\U0001f602', u'\U0001f9d1\U0001f3fb'), (u'1', u'2', u'3'), (u'2', u'3', u'4'), (u'3', u'4', u'5'), (u'4', u'5', u'\U0001f602'), (u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb'), (u'1', u'2', u'3', u'4'), (u'2', u'3', u'4', u'5'), (u'3', u'4', u'5', u'\U0001f602'), (u'4', u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb'), (u'1', u'2', u'3', u'4', u'5'), (u'2', u'3', u'4', u'5', u'\U0001f602'), (u'3', u'4', u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb'), (u'1', u'2', u'3', u'4', u'5', u'\U0001f602'), (u'2', u'3', u'4', u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb')])
# ### DE####
## ROW 1 ###
inp = [[(u'klitze', {u'klitze': 1, u'kli^4tze': 1}), (u'kleine', {u'kle^5ine': 1, u'klein^3e': 1}), u'\xfcberaschung', u'.'], [u'trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)']]
extracted_redu_in_text_container = [[{'start_index_in_orig': 0, 'length': 2, 'word': u'klitze', 'index_in_redu_free': 0}, {'start_index_in_orig': 2, 'length': 2, 'word': u'kleine', 'index_in_redu_free': 1}], []]
#p( stats.compute_baseline(inp,extracted_redu_in_text_container))
set(stats.compute_baseline(inp,extracted_redu_in_text_container)).should.be.equal(set([(u'klitze',), (u'kleine',), (u'\xfcberaschung',), (u'.',), (u'trotzdem',), (u'hat',), (u'sie',), (u'mich',), (u'gl\xfccklich',), (u'gemacht',), (u'!',), (u':-)',), (u'-)',), (u'klitze', u'kleine'), (u'kleine', u'\xfcberaschung'), (u'\xfcberaschung', u'.'), (u'.', u'trotzdem'), (u'trotzdem', u'hat'), (u'hat', u'sie'), (u'sie', u'mich'), (u'mich', u'gl\xfccklich'), (u'gl\xfccklich', u'gemacht'), (u'gemacht', u'!'), (u'!', u':-)'), (u':-)', u'-)'), (u'klitze', u'kleine', u'\xfcberaschung'), (u'kleine', u'\xfcberaschung', u'.'), (u'\xfcberaschung', u'.', u'trotzdem'), (u'.', u'trotzdem', u'hat'), (u'trotzdem', u'hat', u'sie'), (u'hat', u'sie', u'mich'), (u'sie', u'mich', u'gl\xfccklich'), (u'mich', u'gl\xfccklich', u'gemacht'), (u'gl\xfccklich', u'gemacht', u'!'), (u'gemacht', u'!', u':-)'), (u'!', u':-)', u'-)'), (u'klitze', u'kleine', u'\xfcberaschung', u'.'), (u'kleine', u'\xfcberaschung', u'.', u'trotzdem'), (u'\xfcberaschung', u'.', u'trotzdem', u'hat'), (u'.', u'trotzdem', u'hat', u'sie'), (u'trotzdem', u'hat', u'sie', u'mich'), (u'hat', u'sie', u'mich', u'gl\xfccklich'), (u'sie', u'mich', u'gl\xfccklich', u'gemacht'), (u'mich', u'gl\xfccklich', u'gemacht', u'!'), (u'gl\xfccklich', u'gemacht', u'!', u':-)'), (u'gemacht', u'!', u':-)', u'-)'), (u'klitze', u'kleine', u'\xfcberaschung', u'.', u'trotzdem'), (u'kleine', u'\xfcberaschung', u'.', u'trotzdem', u'hat'), (u'\xfcberaschung', u'.', u'trotzdem', u'hat', u'sie'), (u'.', u'trotzdem', u'hat', u'sie', u'mich'), (u'trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich'), (u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht'), (u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!'), (u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)'), (u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)'), (u'klitze', u'kleine', u'\xfcberaschung', u'.', u'trotzdem', u'hat'), (u'kleine', u'\xfcberaschung', u'.', u'trotzdem', u'hat', u'sie'), (u'\xfcberaschung', u'.', u'trotzdem', u'hat', u'sie', u'mich'), (u'.', u'trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich'), (u'trotzdem', u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht'), (u'hat', u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!'), (u'sie', u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)'), (u'mich', u'gl\xfccklich', u'gemacht', u'!', u':-)', u'-)')]))
# # # ########### EN ##############
# ### ROW 1 ###
inp = [[u'i', u'loved', u'it', u'.'], [u'but', u'it', u'was', u'also', u'very', u'pity', u'for', u'me', u'.', u':-(', u'@real_trump', u'#sheetlife', u'#readytogo', u'http://www.absurd.com']]
extracted_redu_in_text_container = [[], [{'start_index_in_orig': 4, 'length': 3, 'word': u'very', 'index_in_redu_free': 4}, {'start_index_in_orig': 7, 'length': 4, 'word': u'pity', 'index_in_redu_free': 5}]]
#p(stats.compute_baseline(inp,extracted_redu_in_text_container))
set(stats.compute_baseline(inp,extracted_redu_in_text_container)).should.be.equal(set([(u'i',), (u'loved',), (u'it',), (u'.',), (u'but',), (u'it',), (u'was',), (u'also',), (u'very',), (u'pity',), (u'for',), (u'me',), (u'.',), (u':-(',), (u'@real_trump',), (u'#sheetlife',), (u'#readytogo',), (u'http://www.absurd.com',), (u'i', u'loved'), (u'loved', u'it'), (u'it', u'.'), (u'.', u'but'), (u'but', u'it'), (u'it', u'was'), (u'was', u'also'), (u'also', u'very'), (u'very', u'pity'), (u'pity', u'for'), (u'for', u'me'), (u'me', u'.'), (u'.', u':-('), (u':-(', u'@real_trump'), (u'@real_trump', u'#sheetlife'), (u'#sheetlife', u'#readytogo'), (u'#readytogo', u'http://www.absurd.com'), (u'i', u'loved', u'it'), (u'loved', u'it', u'.'), (u'it', u'.', u'but'), (u'.', u'but', u'it'), (u'but', u'it', u'was'), (u'it', u'was', u'also'), (u'was', u'also', u'very'), (u'also', u'very', u'pity'), (u'very', u'pity', u'for'), (u'pity', u'for', u'me'), (u'for', u'me', u'.'), (u'me', u'.', u':-('), (u'.', u':-(', u'@real_trump'), (u':-(', u'@real_trump', u'#sheetlife'), (u'@real_trump', u'#sheetlife', u'#readytogo'), (u'#sheetlife', u'#readytogo', u'http://www.absurd.com'), (u'i', u'loved', u'it', u'.'), (u'loved', u'it', u'.', u'but'), (u'it', u'.', u'but', u'it'), (u'.', u'but', u'it', u'was'), (u'but', u'it', u'was', u'also'), (u'it', u'was', u'also', u'very'), (u'was', u'also', u'very', u'pity'), (u'also', u'very', u'pity', u'for'), (u'very', u'pity', u'for', u'me'), (u'pity', u'for', u'me', u'.'), (u'for', u'me', u'.', u':-('), (u'me', u'.', u':-(', u'@real_trump'), (u'.', u':-(', u'@real_trump', u'#sheetlife'), (u':-(', u'@real_trump', u'#sheetlife', u'#readytogo'), (u'@real_trump', u'#sheetlife', u'#readytogo', u'http://www.absurd.com'), (u'i', u'loved', u'it', u'.', u'but'), (u'loved', u'it', u'.', u'but', u'it'), (u'it', u'.', u'but', u'it', u'was'), (u'.', u'but', u'it', u'was', u'also'), (u'but', u'it', u'was', u'also', u'very'), (u'it', u'was', u'also', u'very', u'pity'), (u'was', u'also', u'very', u'pity', u'for'), (u'also', u'very', u'pity', u'for', u'me'), (u'very', u'pity', u'for', u'me', u'.'), (u'pity', u'for', u'me', u'.', u':-('), (u'for', u'me', u'.', u':-(', u'@real_trump'), (u'me', u'.', u':-(', u'@real_trump', u'#sheetlife'), (u'.', u':-(', u'@real_trump', u'#sheetlife', u'#readytogo'), (u':-(', u'@real_trump', u'#sheetlife', u'#readytogo', u'http://www.absurd.com'), (u'i', u'loved', u'it', u'.', u'but', u'it'), (u'loved', u'it', u'.', u'but', u'it', u'was'), (u'it', u'.', u'but', u'it', u'was', u'also'), (u'.', u'but', u'it', u'was', u'also', u'very'), (u'but', u'it', u'was', u'also', u'very', u'pity'), (u'it', u'was', u'also', u'very', u'pity', u'for'), (u'was', u'also', u'very', u'pity', u'for', u'me'), (u'also', u'very', u'pity', u'for', u'me', u'.'), (u'very', u'pity', u'for', u'me', u'.', u':-('), (u'pity', u'for', u'me', u'.', u':-(', u'@real_trump'), (u'for', u'me', u'.', u':-(', u'@real_trump', u'#sheetlife'), (u'me', u'.', u':-(', u'@real_trump', u'#sheetlife', u'#readytogo'), (u'.', u':-(', u'@real_trump', u'#sheetlife', u'#readytogo', u'http://www.absurd.com'), (u'very',), (u'very',), (u'pity',), (u'pity',), (u'pity',)]))
@attr(status='stable')
#@wipd
def test_temporize_baseline_lowercased_607_1(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode, )#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Corpus(logger_level=logging.DEBUG)
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
stats = Stats(mode=self.mode, )#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
stats.corp = corp
stats._corp_info = corp.info()
# ##### FAKE SENT 1 #######
stats._init_compution_variables()
#inp = [[u'klitze', u'kleine', u'\xfcberaschung', u'.']]
computed_baseline = [(u'klitze',), (u'kleine',), (u'\xfcberaschung',), (u'.',), (u'klitze', u'kleine'), (u'kleine', u'\xfcberaschung'), (u'\xfcberaschung', u'.'), (u'klitze', u'kleine', u'\xfcberaschung'), (u'kleine', u'\xfcberaschung', u'.'), (u'klitze', u'kleine', u'\xfcberaschung', u'.')]
#assert set(stats.compute_baseline(inp)) == set(self.compute_baseline_1(inp))
#p( stats.compute_baseline(inp))
extracted_redu_in_text_container = ((),())
stats.temporize_baseline(computed_baseline,extracted_redu_in_text_container)
#p(dict(stats.temporized_baseline), "temporized_list ")
stats.temporized_baseline.should.be.equal( {(u'.',): 1,
(u'kleine',): 1,
(u'kleine', u'\xfcberaschung'): 1,
(u'kleine', u'\xfcberaschung', u'.'): 1,
(u'klitze',): 1,
(u'klitze', u'kleine'): 1,
(u'klitze', u'kleine', u'\xfcberaschung'): 1,
(u'klitze', u'kleine', u'\xfcberaschung', u'.'): 1,
(u'\xfcberaschung',): 1,
(u'\xfcberaschung', u'.'): 1}
)
stats.temporize_baseline(computed_baseline,extracted_redu_in_text_container)
#p(dict(stats.temporized_baseline), "temporized_list ")
stats.temporized_baseline.should.be.equal( {(u'.',): 2,
(u'kleine',): 2,
(u'kleine', u'\xfcberaschung'): 2,
(u'kleine', u'\xfcberaschung', u'.'): 2,
(u'klitze',): 2,
(u'klitze', u'kleine'): 2,
(u'klitze', u'kleine', u'\xfcberaschung'): 2,
(u'klitze', u'kleine', u'\xfcberaschung', u'.'): 2,
(u'\xfcberaschung',): 2,
(u'\xfcberaschung', u'.'): 2}
)
# # ##### FAKE SENT 2#######
stats._init_compution_variables()
computed_baseline = [(u'1',), (u'2',), (u'3',), (u'4',), (u'5',), (u'\U0001f602',), (u'\U0001f9d1\U0001f3fb',), (u'1', u'2'), (u'2', u'3'), (u'3', u'4'), (u'4', u'5'), (u'5', u'\U0001f602'), (u'\U0001f602', u'\U0001f9d1\U0001f3fb'), (u'1', u'2', u'3'), (u'2', u'3', u'4'), (u'3', u'4', u'5'), (u'4', u'5', u'\U0001f602'), (u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb'), (u'1', u'2', u'3', u'4'), (u'2', u'3', u'4', u'5'), (u'3', u'4', u'5', u'\U0001f602'), (u'4', u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb'), (u'1', u'2', u'3', u'4', u'5'), (u'2', u'3', u'4', u'5', u'\U0001f602'), (u'3', u'4', u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb'), (u'1', u'2', u'3', u'4', u'5', u'\U0001f602'), (u'2', u'3', u'4', u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb'), (u'1', u'2', u'3', u'4', u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb')]
stats.temporize_baseline(computed_baseline,extracted_redu_in_text_container)
#p(dict(stats.temporized_baseline), "temporized_list ")
stats.temporized_baseline.should.be.equal( {(u'1',): 1,
(u'1', u'2'): 1,
(u'1', u'2', u'3'): 1,
(u'1', u'2', u'3', u'4'): 1,
(u'1', u'2', u'3', u'4', u'5'): 1,
(u'1', u'2', u'3', u'4', u'5', u'\U0001f602'): 1,
(u'1', u'2', u'3', u'4', u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb'): 1,
(u'2',): 1,
(u'2', u'3'): 1,
(u'2', u'3', u'4'): 1,
(u'2', u'3', u'4', u'5'): 1,
(u'2', u'3', u'4', u'5', u'\U0001f602'): 1,
(u'2', u'3', u'4', u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb'): 1,
(u'3',): 1,
(u'3', u'4'): 1,
(u'3', u'4', u'5'): 1,
(u'3', u'4', u'5', u'\U0001f602'): 1,
(u'3', u'4', u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb'): 1,
(u'4',): 1,
(u'4', u'5'): 1,
(u'4', u'5', u'\U0001f602'): 1,
(u'4', u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb'): 1,
(u'5',): 1,
(u'5', u'\U0001f602'): 1,
(u'5', u'\U0001f602', u'\U0001f9d1\U0001f3fb'): 1,
(u'\U0001f602',): 1,
(u'\U0001f602', u'\U0001f9d1\U0001f3fb'): 1,
(u'\U0001f9d1\U0001f3fb',): 1}
)
@attr(status='stable')
#@wipd
def test_baseline_lazyinsertion_into_db_lowercased_607_2(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode, )#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Corpus(logger_level=logging.DEBUG)
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
stats = Stats(mode=self.mode, )#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
stats.corp = corp
stats._corp_info = corp.info()
# ##### FAKE SENT 1 #######
stats._init_stemmer(stats._corp_info["language"])
stats._init_compution_variables()
#inp = [[u'klitze', u'kleine', u'\xfcberaschung', u'.']]
computed_baseline = [(u'klitze',), (u'kleine',), (u'\xfcberaschung',), (u'.',), (u'klitze', u'kleine'), (u'kleine', u'\xfcberaschung'), (u'\xfcberaschung', u'.'), (u'klitze', u'kleine', u'\xfcberaschung'), (u'kleine', u'\xfcberaschung', u'.'), (u'klitze', u'kleine', u'\xfcberaschung', u'.')]
extracted_redu_in_text_container = [[
{"word":u'klitze',
"length":2},
],]
stats.baseline_lazyinsertion_into_db(computed_baseline,extracted_redu_in_text_container)
##
stats.statsdb.getall("baseline").should.be.equal([])
##
stats.baseline_insert_left_over_data()
inserted_baseline = stats.statsdb.getall("baseline")
#p(inserted_baseline, "inserted_baseline1")
baseline_should_be_in_the_db = [(u'\xfcberaschung', u'\xfcberaschung', 1, 1, None, None, None, None, None, None),
(u'klitze++kleine++\xfcberaschung++.', u'klitz++klein++\xfcberaschung++.', 4, 1, None, None, None, None, None, None),
(u'kleine++\xfcberaschung++.', u'klein++\xfcberaschung++.', 3, 1, None, None, None, None, None, None),
(u'kleine++\xfcberaschung', u'klein++\xfcberaschung', 2, 1, None, None, None, None, None, None),
(u'\xfcberaschung++.', u'\xfcberaschung++.', 2, 1, None, None, None, None, None, None),
(u'klitze++kleine++\xfcberaschung', u'klitz++klein++\xfcberaschung', 3, 1, None, None, None, None, None, None),
(u'klitze++kleine', u'klitz++klein', 2, 1, None, None, None, None, None, None),
(u'.', u'.', 1, 1, None, None, None, None, None, None),
(u'klitze', u'klitz', 1, 2, None, None, None, None, None, None),
(u'kleine', u'klein', 1, 1, None, None, None, None, None, None)]
set(inserted_baseline).should.be.equal(set(baseline_should_be_in_the_db))
### One More Time
#stats.temporized_baseline = temporized_baseline
stats.baseline_lazyinsertion_into_db(computed_baseline,extracted_redu_in_text_container,baseline_insertion_border=1)
stats.baseline_lazyinsertion_into_db(computed_baseline, extracted_redu_in_text_container,baseline_insertion_border=1)
stats.baseline_insert_left_over_data()
inserted_baseline = stats.statsdb.getall("baseline")
#p(inserted_baseline, "inserted_baseline2")
baseline_should_be_in_the_db = [(u'kleine++\xfcberaschung', u'klein++\xfcberaschung', 2, 3, None, None, None, None, None, None),
(u'\xfcberaschung', u'\xfcberaschung', 1, 3, None, None, None, None, None, None),
(u'klitze++kleine++\xfcberaschung++.', u'klitz++klein++\xfcberaschung++.', 4, 3, None, None, None, None, None, None),
(u'klitze++kleine', u'klitz++klein', 2, 3, None, None, None, None, None, None),
(u'kleine++\xfcberaschung++.', u'klein++\xfcberaschung++.', 3, 3, None, None, None, None, None, None),
(u'klitze', u'klitz', 1, 6, None, None, None, None, None, None),
(u'\xfcberaschung++.', u'\xfcberaschung++.', 2, 3, None, None, None, None, None, None),
(u'klitze++kleine++\xfcberaschung', u'klitz++klein++\xfcberaschung', 3, 3, None, None, None, None, None, None),
(u'.', u'.', 1, 3, None, None, None, None, None, None),
(u'kleine', u'klein', 1, 3, None, None, None, None, None, None)]
set(inserted_baseline).should.be.equal(set(baseline_should_be_in_the_db))
@attr(status='stable')
#@wipd
def test_intern_compute_function_lower_case_608(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,status_bar=False)#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Corpus(logger_level=logging.DEBUG)
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
stats._init_compution_variables()
stats._init_preprocessors()
#p(stats.statsdb, "stats.statsdb")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
stats.corp = corp
stats._corp_info = corp.info()
stats._init_stemmer(stats._corp_info["language"])
## DE####
import time
### ROW 1 ###
##### FIRST INSETION
#stats._compute([copy.deepcopy(self.test_dict_row_de_1)])
text_list = [self.test_dict_row_de_1["id"], self.test_dict_row_de_1["text"] ]
stats._compute([copy.deepcopy(text_list)])
#stats._compute([copy.deepcopy([self.test_dict_row_de_1[0], self.test_dict_row_de_1])])
redu = stats.statsdb.getall("reduplications")
repl = stats.statsdb.getall("replications")
baseline = stats.statsdb.getall("baseline")
#p(redu,"redu")
#p(repl,"repl")
#p(baseline,"baseline")
#time.sleep(7)
redu.should.be.equal(
[(1, 8888, u'[4, 9]', u'[0, 0]', u'[0, 0]', u'klitze', u'klitz', u'{"klitze": 1, "kli^4tze": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]'), (2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5ine": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]')]
)
repl.should.be.equal(
[(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'klitz', u'i', 4, 2, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]'), (2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'), (3, 8888, u'[4, 9]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'), (4, 8888, u'[4, 9]', u'[1, 7]', u'[1, 7]', u':-)', u':-)^4', u':-)', u')', 4, 2, None, u'EMOASC', u'["positive", 0.5]', u'sie', u'["PPER", null, "sie"]', u'mich', u'["PPER", null, "mich"]', u'gl\xfccklich', u'["ADJD", null, "gl\\u00fccklich"]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u'-)', u'["EMOASC", null, "-)"]', None, None, None, None, None, None, None, None), (5, 8888, u'[4, 9]', u'[1, 8]', u'[1, 8]', u'-)', u'-)^3', u'-)', u')', 3, 1, None, u'EMOASC', u'["positive", 0.5]', u'mich', u'["PPER", null, "mich"]', u'gl\xfccklich', u'["ADJD", null, "gl\\u00fccklich"]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u':-)', u'["EMOASC", null, ":-)"]', None, None, None, None, None, None, None, None, None, None)]
)
baseline.should.be.equal([(u'.++trotzdem', u'.++trotzdem', 2, 1, None, None, None, None, None, None), (u'!', u'!', 1, 1, None, None, None, None, None, None), (u'hat++sie++mich', u'hat++sie++mich', 3, 1, None, None, None, None, None, None), (u'sie++mich++gl\xfccklich++gemacht++!', u'sie++mich++gl\xfccklich++gemacht++!', 5, 1, None, None, None, None, None, None), (u'gemacht++!', u'gemacht++!', 2, 1, None, None, None, None, None, None), (u'gl\xfccklich++gemacht', u'gl\xfccklich++gemacht', 2, 1, None, None, None, None, None, None), (u'sie++mich++gl\xfccklich', u'sie++mich++gl\xfccklich', 3, 1, None, None, None, None, None, None), (u':-)', u':-)', 1, 1, None, None, None, None, None, None), (u'mich++gl\xfccklich++gemacht++!++:-)++-)', u'mich++gl\xfccklich++gemacht++!++:-)++-)', 6, 1, None, None, None, None, None, None), (u'gl\xfccklich', u'gl\xfccklich', 1, 1, None, None, None, None, None, None), (u'gl\xfccklich++gemacht++!++:-)++-)', u'gl\xfccklich++gemacht++!++:-)++-)', 5, 1, None, None, None, None, None, None), (u'.++trotzdem++hat++sie++mich++gl\xfccklich', u'.++trotzdem++hat++sie++mich++gl\xfccklich', 6, 1, None, None, None, None, None, None), (u'gemacht++!++:-)++-)', u'gemacht++!++:-)++-)', 4, 1, None, None, None, None, None, None), (u'klitze++kleine++\xfcberaschung++.++trotzdem++hat', u'klitz++klein++\xfcberaschung++.++trotzdem++hat', 6, 1, None, None, None, None, None, None), (u'mich++gl\xfccklich++gemacht', u'mich++gl\xfccklich++gemacht', 3, 1, None, None, None, None, None, None), (u'gemacht', u'gemacht', 1, 1, None, None, None, None, None, None), (u'!++:-)', u'!++:-)', 2, 1, None, None, None, None, None, None), (u'.++trotzdem++hat++sie++mich', u'.++trotzdem++hat++sie++mich', 5, 1, None, None, None, None, None, None), (u'trotzdem++hat++sie++mich++gl\xfccklich++gemacht', u'trotzdem++hat++sie++mich++gl\xfccklich++gemacht', 6, 1, None, None, None, None, None, None), (u'\xfcberaschung++.++trotzdem', u'\xfcberaschung++.++trotzdem', 3, 1, None, None, None, None, None, None), (u'klitze++kleine++\xfcberaschung++.++trotzdem', u'klitz++klein++\xfcberaschung++.++trotzdem', 5, 1, None, None, None, None, None, None), (u':-)++-)', u':-)++-)', 2, 1, None, None, None, None, None, None), (u'gl\xfccklich++gemacht++!', u'gl\xfccklich++gemacht++!', 3, 1, None, None, None, None, None, None), (u'\xfcberaschung++.', u'\xfcberaschung++.', 2, 1, None, None, None, None, None, None), (u'hat++sie++mich++gl\xfccklich++gemacht', u'hat++sie++mich++gl\xfccklich++gemacht', 5, 1, None, None, None, None, None, None), (u'trotzdem++hat++sie++mich++gl\xfccklich', u'trotzdem++hat++sie++mich++gl\xfccklich', 5, 1, None, None, None, None, None, None), (u'klitze++kleine', u'klitz++klein', 2, 1, None, None, None, None, None, None), (u'sie++mich++gl\xfccklich++gemacht++!++:-)', u'sie++mich++gl\xfccklich++gemacht++!++:-)', 6, 1, None, None, None, None, None, None), (u'\xfcberaschung', u'\xfcberaschung', 1, 1, None, None, None, None, None, None), (u'trotzdem++hat++sie', u'trotzdem++hat++sie', 3, 1, None, None, None, None, None, None), (u'kleine', u'klein', 1, 2, None, None, None, None, None, None), (u'-)', u'-)', 1, 1, None, None, None, None, None, None), (u'trotzdem++hat++sie++mich', u'trotzdem++hat++sie++mich', 4, 1, None, None, None, None, None, None), (u'trotzdem++hat', u'trotzdem++hat', 2, 1, None, None, None, None, None, None), (u'.', u'.', 1, 1, None, None, None, None, None, None), (u'trotzdem', u'trotzdem', 1, 1, None, None, None, None, None, None), (u'hat', u'hat', 1, 1, None, None, None, None, None, None), (u'mich', u'mich', 1, 1, None, None, None, None, None, None), (u'.++trotzdem++hat', u'.++trotzdem++hat', 3, 1, None, None, None, None, None, None), (u'klitze', u'klitz', 1, 2, None, None, None, None, None, None), (u'hat++sie', u'hat++sie', 2, 1, None, None, None, None, None, None), (u'hat++sie++mich++gl\xfccklich++gemacht++!', u'hat++sie++mich++gl\xfccklich++gemacht++!', 6, 1, None, None, None, None, None, None), (u'gemacht++!++:-)', u'gemacht++!++:-)', 3, 1, None, None, None, None, None, None), (u'hat++sie++mich++gl\xfccklich', u'hat++sie++mich++gl\xfccklich', 4, 1, None, None, None, None, None, None), (u'kleine++\xfcberaschung++.++trotzdem', u'klein++\xfcberaschung++.++trotzdem', 4, 1, None, None, None, None, None, None), (u'kleine++\xfcberaschung++.++trotzdem++hat', u'klein++\xfcberaschung++.++trotzdem++hat', 5, 1, None, None, None, None, None, None), (u'.++trotzdem++hat++sie', u'.++trotzdem++hat++sie', 4, 1, None, None, None, None, None, None), (u'kleine++\xfcberaschung++.++trotzdem++hat++sie', u'klein++\xfcberaschung++.++trotzdem++hat++sie', 6, 1, None, None, None, None, None, None), (u'\xfcberaschung++.++trotzdem++hat++sie++mich', u'\xfcberaschung++.++trotzdem++hat++sie++mich', 6, 1, None, None, None, None, None, None), (u'mich++gl\xfccklich++gemacht++!++:-)', u'mich++gl\xfccklich++gemacht++!++:-)', 5, 1, None, None, None, None, None, None), (u'mich++gl\xfccklich', u'mich++gl\xfccklich', 2, 1, None, None, None, None, None, None), (u'\xfcberaschung++.++trotzdem++hat++sie', u'\xfcberaschung++.++trotzdem++hat++sie', 5, 1, None, None, None, None, None, None), (u'sie++mich', u'sie++mich', 2, 1, None, None, None, None, None, None), (u'sie', u'sie', 1, 1, None, None, None, None, None, None), (u'gl\xfccklich++gemacht++!++:-)', u'gl\xfccklich++gemacht++!++:-)', 4, 1, None, None, None, None, None, None), (u'klitze++kleine++\xfcberaschung', u'klitz++klein++\xfcberaschung', 3, 1, None, None, None, None, None, None), (u'\xfcberaschung++.++trotzdem++hat', u'\xfcberaschung++.++trotzdem++hat', 4, 1, None, None, None, None, None, None), (u'klitze++kleine++\xfcberaschung++.', u'klitz++klein++\xfcberaschung++.', 4, 1, None, None, None, None, None, None), (u'!++:-)++-)', u'!++:-)++-)', 3, 1, None, None, None, None, None, None), (u'mich++gl\xfccklich++gemacht++!', u'mich++gl\xfccklich++gemacht++!', 4, 1, None, None, None, None, None, None), (u'kleine++\xfcberaschung++.', u'klein++\xfcberaschung++.', 3, 1, None, None, None, None, None, None), (u'sie++mich++gl\xfccklich++gemacht', u'sie++mich++gl\xfccklich++gemacht', 4, 1, None, None, None, None, None, None), (u'kleine++\xfcberaschung', u'klein++\xfcberaschung', 2, 1, None, None, None, None, None, None)])
##### SECOND INSETION
#stats._compute([copy.deepcopy(self.test_dict_row_de_1)])
text_list = [self.test_dict_row_de_1["id"], self.test_dict_row_de_1["text"] ]
stats._compute([copy.deepcopy(text_list)])
redu = stats.statsdb.getall("reduplications")
repl = stats.statsdb.getall("replications")
baseline = stats.statsdb.getall("baseline")
# p(redu,"redu")
# p(repl,"repl")
#p(baseline,"baseline")
#time.sleep(7)
redu.should.be.equal(
[(1, 8888, u'[4, 9]', u'[0, 0]', u'[0, 0]', u'klitze', u'klitz', u'{"klitze": 1, "kli^4tze": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]'), (2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5ine": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'), (3, 8888, u'[4, 9]', u'[0, 0]', u'[0, 0]', u'klitze', u'klitz', u'{"klitze": 1, "kli^4tze": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]'), (4, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5ine": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]')]
)
repl.should.be.equal(
[(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'klitz', u'i', 4, 2, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]'), (2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'), (3, 8888, u'[4, 9]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'), (4, 8888, u'[4, 9]', u'[1, 7]', u'[1, 7]', u':-)', u':-)^4', u':-)', u')', 4, 2, None, u'EMOASC', u'["positive", 0.5]', u'sie', u'["PPER", null, "sie"]', u'mich', u'["PPER", null, "mich"]', u'gl\xfccklich', u'["ADJD", null, "gl\\u00fccklich"]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u'-)', u'["EMOASC", null, "-)"]', None, None, None, None, None, None, None, None), (5, 8888, u'[4, 9]', u'[1, 8]', u'[1, 8]', u'-)', u'-)^3', u'-)', u')', 3, 1, None, u'EMOASC', u'["positive", 0.5]', u'mich', u'["PPER", null, "mich"]', u'gl\xfccklich', u'["ADJD", null, "gl\\u00fccklich"]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u':-)', u'["EMOASC", null, ":-)"]', None, None, None, None, None, None, None, None, None, None), (6, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'klitz', u'i', 4, 2, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]'), (7, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'), (8, 8888, u'[4, 9]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "\\u00fcberaschung"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzdem"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'), (9, 8888, u'[4, 9]', u'[1, 7]', u'[1, 7]', u':-)', u':-)^4', u':-)', u')', 4, 2, None, u'EMOASC', u'["positive", 0.5]', u'sie', u'["PPER", null, "sie"]', u'mich', u'["PPER", null, "mich"]', u'gl\xfccklich', u'["ADJD", null, "gl\\u00fccklich"]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u'-)', u'["EMOASC", null, "-)"]', None, None, None, None, None, None, None, None), (10, 8888, u'[4, 9]', u'[1, 8]', u'[1, 8]', u'-)', u'-)^3', u'-)', u')', 3, 1, None, u'EMOASC', u'["positive", 0.5]', u'mich', u'["PPER", null, "mich"]', u'gl\xfccklich', u'["ADJD", null, "gl\\u00fccklich"]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u':-)', u'["EMOASC", null, ":-)"]', None, None, None, None, None, None, None, None, None, None)]
)
baseline.should.be.equal([(u'.++trotzdem', u'.++trotzdem', 2, 2, None, None, None, None, None, None), (u'!', u'!', 1, 2, None, None, None, None, None, None), (u'hat++sie++mich', u'hat++sie++mich', 3, 2, None, None, None, None, None, None), (u'sie++mich++gl\xfccklich++gemacht++!', u'sie++mich++gl\xfccklich++gemacht++!', 5, 2, None, None, None, None, None, None), (u'gemacht++!', u'gemacht++!', 2, 2, None, None, None, None, None, None), (u'gl\xfccklich++gemacht', u'gl\xfccklich++gemacht', 2, 2, None, None, None, None, None, None), (u'sie++mich++gl\xfccklich', u'sie++mich++gl\xfccklich', 3, 2, None, None, None, None, None, None), (u':-)', u':-)', 1, 2, None, None, None, None, None, None), (u'mich++gl\xfccklich++gemacht++!++:-)++-)', u'mich++gl\xfccklich++gemacht++!++:-)++-)', 6, 2, None, None, None, None, None, None), (u'gl\xfccklich', u'gl\xfccklich', 1, 2, None, None, None, None, None, None), (u'gl\xfccklich++gemacht++!++:-)++-)', u'gl\xfccklich++gemacht++!++:-)++-)', 5, 2, None, None, None, None, None, None), (u'.++trotzdem++hat++sie++mich++gl\xfccklich', u'.++trotzdem++hat++sie++mich++gl\xfccklich', 6, 2, None, None, None, None, None, None), (u'gemacht++!++:-)++-)', u'gemacht++!++:-)++-)', 4, 2, None, None, None, None, None, None), (u'klitze++kleine++\xfcberaschung++.++trotzdem++hat', u'klitz++klein++\xfcberaschung++.++trotzdem++hat', 6, 2, None, None, None, None, None, None), (u'mich++gl\xfccklich++gemacht', u'mich++gl\xfccklich++gemacht', 3, 2, None, None, None, None, None, None), (u'gemacht', u'gemacht', 1, 2, None, None, None, None, None, None), (u'!++:-)', u'!++:-)', 2, 2, None, None, None, None, None, None), (u'.++trotzdem++hat++sie++mich', u'.++trotzdem++hat++sie++mich', 5, 2, None, None, None, None, None, None), (u'trotzdem++hat++sie++mich++gl\xfccklich++gemacht', u'trotzdem++hat++sie++mich++gl\xfccklich++gemacht', 6, 2, None, None, None, None, None, None), (u'\xfcberaschung++.++trotzdem', u'\xfcberaschung++.++trotzdem', 3, 2, None, None, None, None, None, None), (u'klitze++kleine++\xfcberaschung++.++trotzdem', u'klitz++klein++\xfcberaschung++.++trotzdem', 5, 2, None, None, None, None, None, None), (u':-)++-)', u':-)++-)', 2, 2, None, None, None, None, None, None), (u'gl\xfccklich++gemacht++!', u'gl\xfccklich++gemacht++!', 3, 2, None, None, None, None, None, None), (u'\xfcberaschung++.', u'\xfcberaschung++.', 2, 2, None, None, None, None, None, None), (u'hat++sie++mich++gl\xfccklich++gemacht', u'hat++sie++mich++gl\xfccklich++gemacht', 5, 2, None, None, None, None, None, None), (u'trotzdem++hat++sie++mich++gl\xfccklich', u'trotzdem++hat++sie++mich++gl\xfccklich', 5, 2, None, None, None, None, None, None), (u'klitze++kleine', u'klitz++klein', 2, 2, None, None, None, None, None, None), (u'sie++mich++gl\xfccklich++gemacht++!++:-)', u'sie++mich++gl\xfccklich++gemacht++!++:-)', 6, 2, None, None, None, None, None, None), (u'\xfcberaschung', u'\xfcberaschung', 1, 2, None, None, None, None, None, None), (u'trotzdem++hat++sie', u'trotzdem++hat++sie', 3, 2, None, None, None, None, None, None), (u'kleine', u'klein', 1, 4, None, None, None, None, None, None), (u'-)', u'-)', 1, 2, None, None, None, None, None, None), (u'trotzdem++hat++sie++mich', u'trotzdem++hat++sie++mich', 4, 2, None, None, None, None, None, None), (u'trotzdem++hat', u'trotzdem++hat', 2, 2, None, None, None, None, None, None), (u'.', u'.', 1, 2, None, None, None, None, None, None), (u'trotzdem', u'trotzdem', 1, 2, None, None, None, None, None, None), (u'hat', u'hat', 1, 2, None, None, None, None, None, None), (u'mich', u'mich', 1, 2, None, None, None, None, None, None), (u'.++trotzdem++hat', u'.++trotzdem++hat', 3, 2, None, None, None, None, None, None), (u'klitze', u'klitz', 1, 4, None, None, None, None, None, None), (u'hat++sie', u'hat++sie', 2, 2, None, None, None, None, None, None), (u'hat++sie++mich++gl\xfccklich++gemacht++!', u'hat++sie++mich++gl\xfccklich++gemacht++!', 6, 2, None, None, None, None, None, None), (u'gemacht++!++:-)', u'gemacht++!++:-)', 3, 2, None, None, None, None, None, None), (u'hat++sie++mich++gl\xfccklich', u'hat++sie++mich++gl\xfccklich', 4, 2, None, None, None, None, None, None), (u'kleine++\xfcberaschung++.++trotzdem', u'klein++\xfcberaschung++.++trotzdem', 4, 2, None, None, None, None, None, None), (u'kleine++\xfcberaschung++.++trotzdem++hat', u'klein++\xfcberaschung++.++trotzdem++hat', 5, 2, None, None, None, None, None, None), (u'.++trotzdem++hat++sie', u'.++trotzdem++hat++sie', 4, 2, None, None, None, None, None, None), (u'kleine++\xfcberaschung++.++trotzdem++hat++sie', u'klein++\xfcberaschung++.++trotzdem++hat++sie', 6, 2, None, None, None, None, None, None), (u'\xfcberaschung++.++trotzdem++hat++sie++mich', u'\xfcberaschung++.++trotzdem++hat++sie++mich', 6, 2, None, None, None, None, None, None), (u'mich++gl\xfccklich++gemacht++!++:-)', u'mich++gl\xfccklich++gemacht++!++:-)', 5, 2, None, None, None, None, None, None), (u'mich++gl\xfccklich', u'mich++gl\xfccklich', 2, 2, None, None, None, None, None, None), (u'\xfcberaschung++.++trotzdem++hat++sie', u'\xfcberaschung++.++trotzdem++hat++sie', 5, 2, None, None, None, None, None, None), (u'sie++mich', u'sie++mich', 2, 2, None, None, None, None, None, None), (u'sie', u'sie', 1, 2, None, None, None, None, None, None), (u'gl\xfccklich++gemacht++!++:-)', u'gl\xfccklich++gemacht++!++:-)', 4, 2, None, None, None, None, None, None), (u'klitze++kleine++\xfcberaschung', u'klitz++klein++\xfcberaschung', 3, 2, None, None, None, None, None, None), (u'\xfcberaschung++.++trotzdem++hat', u'\xfcberaschung++.++trotzdem++hat', 4, 2, None, None, None, None, None, None), (u'klitze++kleine++\xfcberaschung++.', u'klitz++klein++\xfcberaschung++.', 4, 2, None, None, None, None, None, None), (u'!++:-)++-)', u'!++:-)++-)', 3, 2, None, None, None, None, None, None), (u'mich++gl\xfccklich++gemacht++!', u'mich++gl\xfccklich++gemacht++!', 4, 2, None, None, None, None, None, None), (u'kleine++\xfcberaschung++.', u'klein++\xfcberaschung++.', 3, 2, None, None, None, None, None, None), (u'sie++mich++gl\xfccklich++gemacht', u'sie++mich++gl\xfccklich++gemacht', 4, 2, None, None, None, None, None, None), (u'kleine++\xfcberaschung', u'klein++\xfcberaschung', 2, 2, None, None, None, None, None, None)])
# # ########### EN ##############
@attr(status='stable')
#@wipd
def test_get_streams_from_corp_609(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode, status_bar = True)#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Corpus(logger_level=logging.DEBUG)
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
rownum = corp.corpdb.rownum("documents")
stats._init_compution_variables()
stats._text_field_name = corp.info()["text_field_name"]
stats._id_field_name = corp.info()["id_field_name"]
#p((self._text_field_name, self._id_field_name))
#p(rownum, "rownum")
### Try 1
stream_num = 4
streams = stats.get_streams_from_corpus(corp,stream_num )
all_rows_from_corpus = []
for stream in streams:
rows = list(stream[1])
all_rows_from_corpus += rows
len(stream[1]).should.be.equal(len(rows))
rows_as_text = [unicode(row) for row in all_rows_from_corpus]
#p((rows_as_text,))
len(rows_as_text).should.be.equal(rownum)
len(rows_as_text).should.be.equal(len(set(rows_as_text)))
### Try 2
stream_num = 1
streams = stats.get_streams_from_corpus(corp,stream_num )
all_rows_from_corpus = []
#p(( streams, len(streams[0][1]), len(list(streams[0][1])) ))
for stream in streams:
rows = list(stream[1])
all_rows_from_corpus += rows
len(stream[1]).should.be.equal(len(rows))
rows_as_text = [unicode(row) for row in all_rows_from_corpus]
len(rows_as_text).should.be.equal(rownum)
len(rows_as_text).should.be.equal(len(set(rows_as_text)))
@attr(status='stable')
#@wipd
def test_preprocess_610(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Corpus(logger_level=logging.DEBUG)
#######without_closing_db_at_the_end #########
stats = Stats(mode=self.mode,use_cash=True)#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, baseline_delimiter="++",
encryption_key=encryption_key,
ignore_hashtag=True, force_cleaning=True,
ignore_url=True, ignore_mention=True, ignore_punkt=True, ignore_num=True)
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
#stats.compute(corp,stream_number=1,adjust_to_cpu=False, freeze_db=False)
stats.corp = corp
stats._corp_info = stats.corp.info()
stats._init_compution_variables()
stats._compute_cleaning_flags()
text_elem = [[[[u'I', u'PRP'], [u'loved', u'VBD'], [u'it', u'PRP'], [u'.', u'symbol']], [u'positive', 0.7]], [[[u'But', u'CC'], [u'it', u'PRP'], [u'was', u'VBD'], [u'also', u'RB'], [u'verrrryyyyy', u'JJ'], [u'vvveRRRRRRrry', u'NNP'], [u'very', u'RB'], [u'piiiiiiiiity', u'JJ'], [u'pity', u'NN'], [u'pity', u'NN'], [u'piiitttyyy', u'NN'], [u'for', u'IN'], [u'me', u'PRP'], [u'......', u'symbol'], [u':-(((((', u'EMOASC'], [u'@real_trump', u'mention'], [u'#sheetlife', u'hashtag'], [u'#readytogo', u'hashtag'], [u'http://www.absurd.com', u'URL']], [u'negative', -0.1875]]]
results = stats._preprocess(text_elem)
#p(results, "results")
right_results = [([(u'i', u'PRP'), (u'loved', u'VBD'), (u'it', u'PRP'), (None, ':symbol:')], [u'positive', 0.7]), ([(u'but', u'CC'), (u'it', u'PRP'), (u'was', u'VBD'), (u'also', u'RB'), (u'verrrryyyyy', u'JJ'), (u'vvverrrrrrrry', u'NNP'), (u'very', u'RB'), (u'piiiiiiiiity', u'JJ'), (u'pity', u'NN'), (u'pity', u'NN'), (u'piiitttyyy', u'NN'), (u'for', u'IN'), (u'me', u'PRP'), (None, ':symbol:'), (u':-(((((', u'EMOASC'), (None, ':mention:'), (None, ':hashtag:'), (None, ':hashtag:'), (None, ':URL:')], [u'negative', -0.1875])]
results.should.be.equal(right_results)
@attr(status='stable')
#@wipd
def test_main_compute_function_lower_case_for_1_stream__610_1(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Corpus(logger_level=logging.DEBUG)
#######without_closing_db_at_the_end #########
stats = Stats(mode=self.mode,use_cash=True)#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
stats.compute(corp,stream_number=1,adjust_to_cpu=False, freeze_db=False)
baseline = stats.statsdb.getall("baseline")
repls = stats.statsdb.getall("replications")
redus = stats.statsdb.getall("reduplications")
self.configer.right_rep_num["en"]["repls"].should.be.equal(len(repls))
self.configer.right_rep_num["en"]["redus"].should.be.equal(len(redus))
self._check_correctnes(stats.col_index_orig,self.configer._counted_reps["en"],repls=repls, redus=redus, baseline=baseline)
bas_synts = [bs[0] for bs in baseline]
for r in redus:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
for r in repls:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
#######with_closing_db_at_the_end #########
stats = Stats(mode=self.mode,use_cash=True)#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
stats.compute(corp,stream_number=1,adjust_to_cpu=False, freeze_db=True)
baseline = stats.statsdb.getall("baseline")
repls = stats.statsdb.getall("replications")
redus = stats.statsdb.getall("reduplications")
self.configer.right_rep_num["en"]["repls"].should.be.equal(len(repls))
self.configer.right_rep_num["en"]["redus"].should.be.equal(len(redus))
self._check_correctnes(stats.col_index_orig,self.configer._counted_reps["en"],repls=repls, redus=redus, baseline=baseline)
bas_synts = [bs[0] for bs in baseline]
for r in redus:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
for r in repls:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
@attr(status='stable')
@wipd
def test_main_compute_function_lower_case_for_4_streams_610_2(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Corpus(logger_level=logging.DEBUG)
#self.mode = "dev+"
# #######without_closing_db_at_the_end #########
# stats = Stats(mode=self.mode,use_cash=True, status_bar=True)#, )
# stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
# corp = Corpus(mode=self.mode)
# corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
# stats.compute(corp,stream_number=4,adjust_to_cpu=False, freeze_db=False)
# baseline = stats.statsdb.getall("baseline")
# repls = stats.statsdb.getall("replications")
# redus = stats.statsdb.getall("reduplications")
# self.configer.right_rep_num["en"]["repls"].should.be.equal(len(repls))
# self.configer.right_rep_num["en"]["redus"].should.be.equal(len(redus))
# self._check_correctnes(stats.col_index_orig,self.configer._counted_reps["en"],repls=repls, redus=redus, baseline=baseline)
# bas_synts = [bs[0] for bs in baseline]
# for r in redus:
# if r[5] not in bas_synts:
# p(r[5],"ERROR", c="r")
# assert False
# for r in repls:
# if r[5] not in bas_synts:
# p(r[5],"ERROR", c="r")
# assert False
# p("DONE+++")##
#######with_closing_db_at_the_end #########
stats = Stats(mode=self.mode,use_cash=True, status_bar=True)#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, baseline_delimiter="++")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
stats.compute(corp,stream_number=4,adjust_to_cpu=False, freeze_db=True)
baseline = stats.statsdb.getall("baseline")
repls = stats.statsdb.getall("replications")
redus = stats.statsdb.getall("reduplications")
self.configer.right_rep_num["en"]["repls"].should.be.equal(len(repls))
self.configer.right_rep_num["en"]["redus"].should.be.equal(len(redus))
self._check_correctnes(stats.col_index_orig,self.configer._counted_reps["en"],repls=repls, redus=redus, baseline=baseline)
bas_synts = [bs[0] for bs in baseline]
for r in redus:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
for r in repls:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
#p("DONE")
@attr(status='stable')
#@wipd
def test_main_compute_function_lower_case_for_1_stream_with_preprocessing_and_frozen_610_3(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
### Compute Golden Standard Data ##
precomputed_data = copy.deepcopy(self.configer._counted_reps["en"])
del precomputed_data["1"]
del precomputed_data[u'#shetlife']
del precomputed_data[u'.']
del precomputed_data[u'?']
precomputed_data[":hashtag:"] = {'baseline': 4, 'redu': (2, 4)}
right_rep_num = {
"repls":sum([data["repl"][1] for word, data in precomputed_data.items() if "repl" in data ]),
"redus":sum([data["redu"][0] for word, data in precomputed_data.items() if "redu" in data ]),
}
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
########################################################################################
##################################################################
######## full_repetativ_syntagma=False
########################################################################################
########################################################################################
#self.mode = "prod+"
#####NOT FREEZED #####
#### baseline_insertion_border=10 ####
stats = Stats(mode=self.mode,use_cash=True,status_bar=True)#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, baseline_delimiter="++",
encryption_key=encryption_key,
ignore_hashtag=True, force_cleaning=True,
ignore_url=True, ignore_mention=True, ignore_punkt=True, ignore_num=True)
stats.compute(corp,stream_number=1,adjust_to_cpu=False, freeze_db=False, baseline_insertion_border=10)
baseline = stats.statsdb.getall("baseline")
repls = stats.statsdb.getall("replications")
redus = stats.statsdb.getall("reduplications")
#repls.should.be.equal(right_repls)
#redus.should.be.equal(right_redus)
#p(right_rep_num)
self._check_correctnes(stats.col_index_orig,precomputed_data,repls=repls, redus=redus, baseline=baseline)
right_rep_num["repls"].should.be.equal(len(repls))
right_rep_num["redus"].should.be.equal(len(redus))
bas_synts = [bs[0] for bs in baseline]
for r in redus:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
for r in repls:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
@attr(status='stable')
#@wipd
def test_main_compute_function_lower_case_for_1_stream_with_and_without_optimization_610_4(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
right_repls = [(1, 1111, u'[4, 14]', u'[1, 4]', u'[1, 4]', u'very', u'ver^4y^5', u'veri', u'r', 4, 2, u'[1, 4]', u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'), (2, 1111, u'[4, 14]', u'[1, 4]', u'[1, 4]', u'very', u'ver^4y^5', u'veri', u'y', 5, 3, u'[1, 4]', u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'), (3, 1111, u'[4, 14]', u'[1, 5]', u'[1, 4]', u'very', u'v^3er^8y', u'veri', u'v', 3, 0, u'[1, 4]', u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'), (4, 1111, u'[4, 14]', u'[1, 5]', u'[1, 4]', u'very', u'v^3er^8y', u'veri', u'r', 8, 2, u'[1, 4]', u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'), (5, 1111, u'[4, 14]', u'[1, 7]', u'[1, 5]', u'pity', u'pi^9ty', u'piti', u'i', 9, 1, u'[1, 5]', u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]'), (6, 1111, u'[4, 14]', u'[1, 10]', u'[1, 5]', u'pity', u'pi^3t^3y^3', u'piti', u'i', 3, 1, u'[1, 5]', u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]'), (7, 1111, u'[4, 14]', u'[1, 10]', u'[1, 5]', u'pity', u'pi^3t^3y^3', u'piti', u't', 3, 2, u'[1, 5]', u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]'), (8, 1111, u'[4, 14]', u'[1, 10]', u'[1, 5]', u'pity', u'pi^3t^3y^3', u'piti', u'y', 3, 3, u'[1, 5]', u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]'), (9, 1111, u'[4, 14]', u'[1, 13]', u'[1, 8]', u'.', u'.^6', u'.', u'.', 6, 0, None, u'symbol', u'["negative", -0.1875]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]', u'#shetlife', u'["hashtag", null, "#shetlif"]', u'#readytogo', u'["hashtag", null, "#readytogo"]', u'http://www.absurd.com', u'["URL", null, "http://www.absurd.com"]'), (10, 1111, u'[4, 14]', u'[1, 14]', u'[1, 9]', u':-(', u':-(^5', u':-(', u'(', 5, 2, None, u'EMOASC', u'["negative", -0.1875]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u'@real_trump', u'["mention", null, "@real_trump"]', u'#shetlife', u'["hashtag", null, "#shetlif"]', u'#readytogo', u'["hashtag", null, "#readytogo"]', u'http://www.absurd.com', u'["URL", null, "http://www.absurd.com"]', None, None), (11, 2222, u'[5]', u'[0, 0]', u'[0, 0]', u'glad', u'gla^7d', u'glad', u'a', 7, 2, None, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'to', u'["TO", null, "to"]', u'se', u'["VB", null, "se"]', u'you', u'["PRP", null, "you"]', u'-)', u'["EMOASC", null, "-)"]', None, None), (12, 2222, u'[5]', u'[0, 2]', u'[0, 2]', u'se', u'se^9', u'se', u'e', 9, 1, None, u'VB', u'["neutral", 0.0]', None, None, None, None, None, None, u'glad', u'["NN", null, "glad"]', u'to', u'["TO", null, "to"]', u'you', u'["PRP", null, "you"]', u'-)', u'["EMOASC", null, "-)"]', None, None, None, None, None, None), (13, 2222, u'[5]', u'[0, 4]', u'[0, 4]', u'-)', u'-)^4', u'-)', u')', 4, 1, None, u'EMOASC', u'["neutral", 0.0]', None, None, u'glad', u'["NN", null, "glad"]', u'to', u'["TO", null, "to"]', u'se', u'["VB", null, "se"]', u'you', u'["PRP", null, "you"]', None, None, None, None, None, None, None, None, None, None), (14, 3333, u'[15]', u'[0, 1]', u'[0, 1]', u'bad', u'bad^5', u'bad', u'd', 5, 2, u'[0, 1]', u'JJ', u'["negative", -0.7249999999999999]', None, None, None, None, None, None, None, None, u'a', u'["DT", null, "a"]', u'news', u'["NN", null, "news"]', u',', u'["symbol", null, ","]', u'which', u'["WDT", null, "which"]', u'we', u'["PRP", null, "we"]', u'can', u'["MD", null, "can"]'), (15, 3333, u'[15]', u'[0, 3]', u'[0, 1]', u'bad', u'b^7a^6d', u'bad', u'b', 7, 0, u'[0, 1]', u'JJ', u'["negative", -0.7249999999999999]', None, None, None, None, None, None, None, None, u'a', u'["DT", null, "a"]', u'news', u'["NN", null, "news"]', u',', u'["symbol", null, ","]', u'which', u'["WDT", null, "which"]', u'we', u'["PRP", null, "we"]', u'can', u'["MD", null, "can"]'), (16, 3333, u'[15]', u'[0, 3]', u'[0, 1]', u'bad', u'b^7a^6d', u'bad', u'a', 6, 1, u'[0, 1]', u'JJ', u'["negative", -0.7249999999999999]', None, None, None, None, None, None, None, None, u'a', u'["DT", null, "a"]', u'news', u'["NN", null, "news"]', u',', u'["symbol", null, ","]', u'which', u'["WDT", null, "which"]', u'we', u'["PRP", null, "we"]', u'can', u'["MD", null, "can"]'), (17, 3333, u'[15]', u'[0, 4]', u'[0, 1]', u'bad', u'b^4a^4d^5', u'bad', u'b', 4, 0, u'[0, 1]', u'JJ', u'["negative", -0.7249999999999999]', None, None, None, None, None, None, None, None, u'a', u'["DT", null, "a"]', u'news', u'["NN", null, "news"]', u',', u'["symbol", null, ","]', u'which', u'["WDT", null, "which"]', u'we', u'["PRP", null, "we"]', u'can', u'["MD", null, "can"]'), (18, 3333, u'[15]', u'[0, 4]', u'[0, 1]', u'bad', u'b^4a^4d^5', u'bad', u'a', 4, 1, u'[0, 1]', u'JJ', u'["negative", -0.7249999999999999]', None, None, None, None, None, None, None, None, u'a', u'["DT", null, "a"]', u'news', u'["NN", null, "news"]', u',', u'["symbol", null, ","]', u'which', u'["WDT", null, "which"]', u'we', u'["PRP", null, "we"]', u'can', u'["MD", null, "can"]'), (19, 3333, u'[15]', u'[0, 4]', u'[0, 1]', u'bad', u'b^4a^4d^5', u'bad', u'd', 5, 2, u'[0, 1]', u'JJ', u'["negative", -0.7249999999999999]', None, None, None, None, None, None, None, None, u'a', u'["DT", null, "a"]', u'news', u'["NN", null, "news"]', u',', u'["symbol", null, ","]', u'which', u'["WDT", null, "which"]', u'we', u'["PRP", null, "we"]', u'can', u'["MD", null, "can"]'), (20, 3333, u'[15]', u'[0, 5]', u'[0, 1]', u'bad', u'ba^7d', u'bad', u'a', 7, 1, u'[0, 1]', u'JJ', u'["negative", -0.7249999999999999]', None, None, None, None, None, None, None, None, u'a', u'["DT", null, "a"]', u'news', u'["NN", null, "news"]', u',', u'["symbol", null, ","]', u'which', u'["WDT", null, "which"]', u'we', u'["PRP", null, "we"]', u'can', u'["MD", null, "can"]'), (21, 3333, u'[15]', u'[0, 14]', u'[0, 10]', u'-(', u'-(^4', u'-(', u'(', 4, 1, None, u'EMOASC', u'["negative", -0.7249999999999999]', u'we', u'["PRP", null, "we"]', u'can', u'["MD", null, "can"]', u'not', u'["RB", null, "not"]', u'acept', u'["VB", null, "acept"]', u'.', u'["symbol", null, "."]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', u':-(', u'["EMOASC", null, ":-("]', u'#shetlife', u'["hashtag", {"#shetlife": 2}, "#shetlif"]', u'http://www.noooo.com', u'["URL", null, "http://www.noooo.com"]', None, None), (22, 3333, u'[15]', u'[0, 15]', u'[0, 11]', u'\U0001f62b', u'\U0001f62b^12', u'\U0001f62b', u'\U0001f62b', 12, 0, None, u'EMOIMG', u'["negative", -0.7249999999999999]', u'can', u'["MD", null, "can"]', u'not', u'["RB", null, "not"]', u'acept', u'["VB", null, "acept"]', u'.', u'["symbol", null, "."]', u'-(', u'["EMOASC", null, "-("]', u':-(', u'["EMOASC", null, ":-("]', u'#shetlife', u'["hashtag", {"#shetlife": 2}, "#shetlif"]', u'http://www.noooo.com', u'["URL", null, "http://www.noooo.com"]', None, None, None, None), (23, 3333, u'[15]', u'[0, 16]', u'[0, 12]', u':-(', u':-(^5', u':-(', u'(', 5, 2, None, u'EMOASC', u'["negative", -0.7249999999999999]', u'not', u'["RB", null, "not"]', u'acept', u'["VB", null, "acept"]', u'.', u'["symbol", null, "."]', u'-(', u'["EMOASC", null, "-("]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', u'#shetlife', u'["hashtag", {"#shetlife": 2}, "#shetlif"]', u'http://www.noooo.com', u'["URL", null, "http://www.noooo.com"]', None, None, None, None, None, None), (24, 4444, u'[13]', u'[0, 6]', u'[0, 1]', u'model', u'mo^7del^7', u'model', u'o', 7, 1, None, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'tiny', u'["JJ", {"tiny": 6}, "tini"]', u',', u'["symbol", null, ","]', u'which', u'["WDT", null, "which"]', u'we', u'["PRP", null, "we"]', u'can', u'["MD", null, "can"]', u'use', u'["VB", null, "use"]'), (25, 4444, u'[13]', u'[0, 6]', u'[0, 1]', u'model', u'mo^7del^7', u'model', u'l', 7, 4, None, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'tiny', u'["JJ", {"tiny": 6}, "tini"]', u',', u'["symbol", null, ","]', u'which', u'["WDT", null, "which"]', u'we', u'["PRP", null, "we"]', u'can', u'["MD", null, "can"]', u'use', u'["VB", null, "use"]'), (26, 4444, u'[13]', u'[0, 15]', u'[0, 10]', u'big', u'bi^3g', u'big', u'i', 3, 1, u'[0, 10]', u'NN', u'["neutral", 0.0]', u'can', u'["MD", null, "can"]', u'use', u'["VB", null, "use"]', u'for', u'["IN", null, "for"]', u'explain', u'["VB", null, "explain"]', u'a', u'["DT", null, "a"]', u'things', u'["NNS", null, "thing"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None), (27, 4444, u'[13]', u'[0, 16]', u'[0, 10]', u'big', u'bi^15g', u'big', u'i', 15, 1, u'[0, 10]', u'NN', u'["neutral", 0.0]', u'can', u'["MD", null, "can"]', u'use', u'["VB", null, "use"]', u'for', u'["IN", null, "for"]', u'explain', u'["VB", null, "explain"]', u'a', u'["DT", null, "a"]', u'things', u'["NNS", null, "thing"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None), (28, 5555, u'[8, 2, 11, 4]', u'[0, 8]', u'[0, 6]', u'explanation', u'expla^5nation', u'explan', u'a', 5, 4, None, u'NN', u'["neutral", 0.0]', u'model', u'["NN", null, "model"]', u',', u'["symbol", null, ","]', u'but', u'["CC", null, "but"]', u'a', u'["DT", null, "a"]', u'big', u'["JJ", {"big": 3}, "big"]', u'.', u'["symbol", null, "."]', u'right', u'["UH", null, "right"]', u'?', u'["symbol", null, "?"]', u'what', u'["WP", null, "what"]', u'do', u'["VBP", null, "do"]'), (29, 5555, u'[8, 2, 11, 4]', u'[1, 0]', u'[1, 0]', u'right', u'ri^6ght', u'right', u'i', 6, 1, None, u'UH', u'["neutral", 0.0]', u'but', u'["CC", null, "but"]', u'a', u'["DT", null, "a"]', u'big', u'["JJ", {"big": 3}, "big"]', u'explanation', u'["NN", null, "explan"]', u'.', u'["symbol", null, "."]', u'?', u'["symbol", null, "?"]', u'what', u'["WP", null, "what"]', u'do', u'["VBP", null, "do"]', u'you', u'["PRP", null, "you"]', u'think', u'["VB", null, "think"]'), (30, 5555, u'[8, 2, 11, 4]', u'[2, 2]', u'[2, 2]', u'you', u'you^6', u'you', u'u', 6, 2, None, u'PRP', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'right', u'["UH", null, "right"]', u'?', u'["symbol", null, "?"]', u'what', u'["WP", null, "what"]', u'do', u'["VBP", null, "do"]', u'think', u'["VB", null, "think"]', u'about', u'["IN", null, "about"]', u'it', u'["PRP", null, "it"]', u'?', u'["symbol", null, "?"]', 1, u'["number", null, "1"]'), (31, 5555, u'[8, 2, 11, 4]', u'[2, 6]', u'[2, 6]', u'?', u'?^4', u'?', u'?', 4, 0, None, u'symbol', u'["neutral", 0.0]', u'do', u'["VBP", null, "do"]', u'you', u'["PRP", null, "you"]', u'think', u'["VB", null, "think"]', u'about', u'["IN", null, "about"]', u'it', u'["PRP", null, "it"]', 1, u'["number", null, "1"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]'), (32, 5555, u'[8, 2, 11, 4]', u'[2, 7]', u'[2, 7]', u'1', u'1^6', u'1', u'1', 6, 0, None, u'number', u'["neutral", 0.0]', u'you', u'["PRP", null, "you"]', u'think', u'["VB", null, "think"]', u'about', u'["IN", null, "about"]', u'it', u'["PRP", null, "it"]', u'?', u'["symbol", null, "?"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]'), (33, 5555, u'[8, 2, 11, 4]', u'[2, 8]', u'[2, 8]', u'\U0001f62b', u'\U0001f62b^4', u'\U0001f62b', u'\U0001f62b', 4, 0, None, u'EMOIMG', u'["neutral", 0.0]', u'think', u'["VB", null, "think"]', u'about', u'["IN", null, "about"]', u'it', u'["PRP", null, "it"]', u'?', u'["symbol", null, "?"]', 1, u'["number", null, "1"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["FW", {"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}, "but"]'), (34, 5555, u'[8, 2, 11, 4]', u'[2, 9]', u'[2, 9]', u'1', u'1^8', u'1', u'1', 8, 0, None, u'number', u'["neutral", 0.0]', u'about', u'["IN", null, "about"]', u'it', u'["PRP", null, "it"]', u'?', u'["symbol", null, "?"]', 1, u'["number", null, "1"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["FW", {"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}, "but"]', u'you', u'["FW", null, "you"]'), (35, 5555, u'[8, 2, 11, 4]', u'[3, 0]', u'[3, 0]', u'but', u'b^5u^4t^4', u'but', u'b', 5, 0, u'[3, 0]', u'NNP', u'["neutral", 0.0]', u'?', u'["symbol", null, "?"]', 1, u'["number", null, "1"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["FW", {"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}, "but"]', u'you', u'["FW", null, "you"]', None, None, None, None), (36, 5555, u'[8, 2, 11, 4]', u'[3, 0]', u'[3, 0]', u'but', u'b^5u^4t^4', u'but', u'u', 4, 1, u'[3, 0]', u'NNP', u'["neutral", 0.0]', u'?', u'["symbol", null, "?"]', 1, u'["number", null, "1"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["FW", {"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}, "but"]', u'you', u'["FW", null, "you"]', None, None, None, None), (37, 5555, u'[8, 2, 11, 4]', u'[3, 0]', u'[3, 0]', u'but', u'b^5u^4t^4', u'but', u't', 4, 2, u'[3, 0]', u'NNP', u'["neutral", 0.0]', u'?', u'["symbol", null, "?"]', 1, u'["number", null, "1"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["FW", {"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}, "but"]', u'you', u'["FW", null, "you"]', None, None, None, None), (38, 5555, u'[8, 2, 11, 4]', u'[3, 1]', u'[3, 0]', u'but', u'bu^5t^4', u'but', u'u', 5, 1, u'[3, 0]', u'NNP', u'["neutral", 0.0]', u'?', u'["symbol", null, "?"]', 1, u'["number", null, "1"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["FW", {"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}, "but"]', u'you', u'["FW", null, "you"]', None, None, None, None), (39, 5555, u'[8, 2, 11, 4]', u'[3, 1]', u'[3, 0]', u'but', u'bu^5t^4', u'but', u't', 4, 2, u'[3, 0]', u'NNP', u'["neutral", 0.0]', u'?', u'["symbol", null, "?"]', 1, u'["number", null, "1"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["FW", {"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}, "but"]', u'you', u'["FW", null, "you"]', None, None, None, None), (40, 5555, u'[8, 2, 11, 4]', u'[3, 2]', u'[3, 1]', u'you', u'y^6ou', u'you', u'y', 6, 0, u'[3, 1]', u'NN', u'["neutral", 0.0]', 1, u'["number", null, "1"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'but', u'["FW", {"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}, "but"]', u'you', u'["FW", null, "you"]', None, None, None, None, None, None), (41, 5555, u'[8, 2, 11, 4]', u'[3, 3]', u'[3, 1]', u'you', u'yo^6u', u'you', u'o', 6, 1, u'[3, 1]', u'NN', u'["neutral", 0.0]', 1, u'["number", null, "1"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'but', u'["FW", {"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}, "but"]', u'you', u'["FW", null, "you"]', None, None, None, None, None, None), (42, 5555, u'[8, 2, 11, 4]', u'[3, 4]', u'[3, 2]', u'but', u'b^6ut', u'but', u'b', 6, 0, u'[3, 2]', u'FW', u'["neutral", 0.0]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'you', u'["FW", null, "you"]', None, None, None, None, None, None, None, None), (43, 5555, u'[8, 2, 11, 4]', u'[3, 5]', u'[3, 2]', u'but', u'b^5ut^4', u'but', u'b', 5, 0, u'[3, 2]', u'FW', u'["neutral", 0.0]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'you', u'["FW", null, "you"]', None, None, None, None, None, None, None, None), (44, 5555, u'[8, 2, 11, 4]', u'[3, 5]', u'[3, 2]', u'but', u'b^5ut^4', u'but', u't', 4, 2, u'[3, 2]', u'FW', u'["neutral", 0.0]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'you', u'["FW", null, "you"]', None, None, None, None, None, None, None, None), (45, 5555, u'[8, 2, 11, 4]', u'[3, 6]', u'[3, 2]', u'but', u'b^5u^5t', u'but', u'b', 5, 0, u'[3, 2]', u'FW', u'["neutral", 0.0]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'you', u'["FW", null, "you"]', None, None, None, None, None, None, None, None), (46, 5555, u'[8, 2, 11, 4]', u'[3, 6]', u'[3, 2]', u'but', u'b^5u^5t', u'but', u'u', 5, 1, u'[3, 2]', u'FW', u'["neutral", 0.0]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'you', u'["FW", null, "you"]', None, None, None, None, None, None, None, None), (47, 5555, u'[8, 2, 11, 4]', u'[3, 7]', u'[3, 3]', u'you', u'y^3o^2u^4', u'you', u'y', 3, 0, None, u'FW', u'["neutral", 0.0]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["FW", {"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}, "but"]', None, None, None, None, None, None, None, None, None, None), (48, 5555, u'[8, 2, 11, 4]', u'[3, 7]', u'[3, 3]', u'you', u'y^3o^2u^4', u'you', u'u', 4, 2, None, u'FW', u'["neutral", 0.0]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["FW", {"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}, "but"]', None, None, None, None, None, None, None, None, None, None), (49, 6666, u'[3, 9]', u'[0, 0]', u'[0, 0]', u'tiny', u'tin^3y^2', u'tini', u'n', 3, 2, u'[0, 0]', u'JJ', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t": 1, "b^5ut": 1}, "but"]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]'), (50, 6666, u'[3, 9]', u'[1, 0]', u'[1, 0]', u'but', u'b^5ut', u'but', u'b', 5, 0, u'[1, 0]', u'NNP', u'["neutral", 0.0]', None, None, None, None, u'tiny', u'["JJ", {"tin^3y^2": 1, "tiny": 2}, "tini"]', u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]'), (51, 6666, u'[3, 9]', u'[1, 1]', u'[1, 0]', u'but', u'bu^5t', u'but', u'u', 5, 1, u'[1, 0]', u'NNP', u'["neutral", 0.0]', None, None, None, None, u'tiny', u'["JJ", {"tin^3y^2": 1, "tiny": 2}, "tini"]', u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]'), (52, 6666, u'[3, 9]', u'[1, 2]', u'[1, 1]', u'you', u'y^6ou', u'you', u'y', 6, 0, u'[1, 1]', u'JJ', u'["neutral", 0.0]', None, None, u'tiny', u'["JJ", {"tin^3y^2": 1, "tiny": 2}, "tini"]', u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t": 1, "b^5ut": 1}, "but"]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]'), (53, 6666, u'[3, 9]', u'[1, 3]', u'[1, 1]', u'you', u'yo^6u', u'you', u'o', 6, 1, u'[1, 1]', u'JJ', u'["neutral", 0.0]', None, None, u'tiny', u'["JJ", {"tin^3y^2": 1, "tiny": 2}, "tini"]', u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t": 1, "b^5ut": 1}, "but"]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]'), (54, 6666, u'[3, 9]', u'[1, 4]', u'[1, 2]', u'but', u'b^6ut', u'but', u'b', 6, 0, u'[1, 2]', u'CC', u'["neutral", 0.0]', u'tiny', u'["JJ", {"tin^3y^2": 1, "tiny": 2}, "tini"]', u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t": 1, "b^5ut": 1}, "but"]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]'), (55, 6666, u'[3, 9]', u'[1, 5]', u'[1, 2]', u'but', u'b^5ut', u'but', u'b', 5, 0, u'[1, 2]', u'CC', u'["neutral", 0.0]', u'tiny', u'["JJ", {"tin^3y^2": 1, "tiny": 2}, "tini"]', u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t": 1, "b^5ut": 1}, "but"]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]'), (56, 6666, u'[3, 9]', u'[1, 6]', u'[1, 2]', u'but', u'b^5ut', u'but', u'b', 5, 0, u'[1, 2]', u'CC', u'["neutral", 0.0]', u'tiny', u'["JJ", {"tin^3y^2": 1, "tiny": 2}, "tini"]', u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t": 1, "b^5ut": 1}, "but"]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]'), (57, 6666, u'[3, 9]', u'[1, 7]', u'[1, 3]', u'you', u'y^3o^2u^4', u'you', u'y', 3, 0, None, u'VBD', u'["neutral", 0.0]', u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t": 1, "b^5ut": 1}, "but"]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]'), (58, 6666, u'[3, 9]', u'[1, 7]', u'[1, 3]', u'you', u'y^3o^2u^4', u'you', u'u', 4, 2, None, u'VBD', u'["neutral", 0.0]', u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t": 1, "b^5ut": 1}, "but"]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]'), (59, 6666, u'[3, 9]', u'[1, 8]', u'[1, 4]', u'\U0001f600', u'\U0001f600^5', u'\U0001f600', u'\U0001f600', 5, 0, None, u'EMOIMG', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t": 1, "b^5ut": 1}, "but"]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]', u'you', u'["VBD", null, "you"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', None, None), (60, 6666, u'[3, 9]', u'[1, 9]', u'[1, 5]', u'\U0001f308', u'\U0001f308^7', u'\U0001f308', u'\U0001f308', 7, 0, None, u'EMOIMG', u'["neutral", 0.0]', u'but', u'["NNP", {"bu^5t": 1, "b^5ut": 1}, "but"]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', None, None, None, None), (61, 6666, u'[3, 9]', u'[1, 10]', u'[1, 6]', u'\U0001f600', u'\U0001f600^5', u'\U0001f600', u'\U0001f600', 5, 0, None, u'EMOIMG', u'["neutral", 0.0]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', None, None, None, None, None, None), (62, 6666, u'[3, 9]', u'[1, 11]', u'[1, 7]', u'\U0001f308', u'\U0001f308^7', u'\U0001f308', u'\U0001f308', 7, 0, None, u'EMOIMG', u'["neutral", 0.0]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', None, None, None, None, None, None, None, None), (63, 6666, u'[3, 9]', u'[1, 12]', u'[1, 8]', u'\U0001f600', u'\U0001f600^5', u'\U0001f600', u'\U0001f600', 5, 0, None, u'EMOIMG', u'["neutral", 0.0]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', None, None, None, None, None, None, None, None, None, None), (64, 7777, u'[19]', u'[0, 7]', u'[0, 7]', u'\U0001f62b', u'\U0001f62b^4', u'\U0001f62b', u'\U0001f62b', 4, 0, None, u'EMOIMG', u'["positive", 0.27]', u'realy', u'["RB", null, "reali"]', u'bad', u'["JJ", null, "bad"]', u'surprise', u'["NN", null, "surpris"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u',', u'["symbol", null, ","]', u'but', u'["MD", null, "but"]', u'i', u'["PRP", null, "i"]', u'realy', u'["RB", {"realy": 1, "real^3y": 1, "re^5al^4y^3": 1}, "reali"]', u'liked', u'["VBD", null, "like"]'), (65, 7777, u'[19]', u'[0, 9]', u'[0, 9]', u'but', u'bu^10t', u'but', u'u', 10, 1, None, u'MD', u'["positive", 0.27]', u'surprise', u'["NN", null, "surpris"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', u',', u'["symbol", null, ","]', u'i', u'["PRP", null, "i"]', u'realy', u'["RB", {"realy": 1, "real^3y": 1, "re^5al^4y^3": 1}, "reali"]', u'liked', u'["VBD", null, "like"]', u'it', u'["PRP", null, "it"]', u':p', u'["EMOASC", null, ":p"]'), (66, 7777, u'[19]', u'[0, 12]', u'[0, 11]', u'realy', u'real^3y', u'reali', u'l', 3, 3, u'[0, 11]', u'RB', u'["positive", 0.27]', u'me', u'["PRP", null, "me"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', u',', u'["symbol", null, ","]', u'but', u'["MD", null, "but"]', u'i', u'["PRP", null, "i"]', u'liked', u'["VBD", null, "like"]', u'it', u'["PRP", null, "it"]', u':p', u'["EMOASC", null, ":p"]', u'=)', u'["EMOASC", null, "=)"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]'), (67, 7777, u'[19]', u'[0, 13]', u'[0, 11]', u'realy', u're^5al^4y^3', u'reali', u'e', 5, 1, u'[0, 11]', u'RB', u'["positive", 0.27]', u'me', u'["PRP", null, "me"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', u',', u'["symbol", null, ","]', u'but', u'["MD", null, "but"]', u'i', u'["PRP", null, "i"]', u'liked', u'["VBD", null, "like"]', u'it', u'["PRP", null, "it"]', u':p', u'["EMOASC", null, ":p"]', u'=)', u'["EMOASC", null, "=)"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]'), (68, 7777, u'[19]', u'[0, 13]', u'[0, 11]', u'realy', u're^5al^4y^3', u'reali', u'l', 4, 3, u'[0, 11]', u'RB', u'["positive", 0.27]', u'me', u'["PRP", null, "me"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', u',', u'["symbol", null, ","]', u'but', u'["MD", null, "but"]', u'i', u'["PRP", null, "i"]', u'liked', u'["VBD", null, "like"]', u'it', u'["PRP", null, "it"]', u':p', u'["EMOASC", null, ":p"]', u'=)', u'["EMOASC", null, "=)"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]'), (69, 7777, u'[19]', u'[0, 13]', u'[0, 11]', u'realy', u're^5al^4y^3', u'reali', u'y', 3, 4, u'[0, 11]', u'RB', u'["positive", 0.27]', u'me', u'["PRP", null, "me"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', u',', u'["symbol", null, ","]', u'but', u'["MD", null, "but"]', u'i', u'["PRP", null, "i"]', u'liked', u'["VBD", null, "like"]', u'it', u'["PRP", null, "it"]', u':p', u'["EMOASC", null, ":p"]', u'=)', u'["EMOASC", null, "=)"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]'), (70, 7777, u'[19]', u'[0, 17]', u'[0, 15]', u'=)', u'=)^10', u'=)', u')', 10, 1, None, u'EMOASC', u'["positive", 0.27]', u'i', u'["PRP", null, "i"]', u'realy', u'["RB", {"realy": 1, "real^3y": 1, "re^5al^4y^3": 1}, "reali"]', u'liked', u'["VBD", null, "like"]', u'it', u'["PRP", null, "it"]', u':p', u'["EMOASC", null, ":p"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', None, None, None, None), (71, 7777, u'[19]', u'[0, 18]', u'[0, 16]', u'\U0001f600', u'\U0001f600^5', u'\U0001f600', u'\U0001f600', 5, 0, None, u'EMOIMG', u'["positive", 0.27]', u'realy', u'["RB", {"realy": 1, "real^3y": 1, "re^5al^4y^3": 1}, "reali"]', u'liked', u'["VBD", null, "like"]', u'it', u'["PRP", null, "it"]', u':p', u'["EMOASC", null, ":p"]', u'=)', u'["EMOASC", null, "=)"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', None, None, None, None, None, None), (72, 7777, u'[19]', u'[0, 19]', u'[0, 17]', u'\U0001f308', u'\U0001f308^7', u'\U0001f308', u'\U0001f308', 7, 0, None, u'EMOIMG', u'["positive", 0.27]', u'liked', u'["VBD", null, "like"]', u'it', u'["PRP", null, "it"]', u':p', u'["EMOASC", null, ":p"]', u'=)', u'["EMOASC", null, "=)"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', None, None, None, None, None, None, None, None)]
right_redus = [(1, 1111, u'[4, 14]', u'[1, 4]', u'[1, 4]', u'very', u'veri', u'{"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}', 3, u'JJ', u'["negative", -0.1875]', u'.', u'["symbol", null, "."]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'pity', u'["JJ", {"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}, "piti"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]'), (2, 1111, u'[4, 14]', u'[1, 7]', u'[1, 5]', u'pity', u'piti', u'{"pity": 2, "pi^3t^3y^3": 1, "pi^9ty": 1}', 4, u'JJ', u'["negative", -0.1875]', u'but', u'["CC", null, "but"]', u'it', u'["PRP", null, "it"]', u'was', u'["VBD", null, "was"]', u'also', u'["RB", null, "also"]', u'very', u'["JJ", {"very": 1, "ver^4y^5": 1, "v^3er^8y": 1}, "veri"]', u'for', u'["IN", null, "for"]', u'me', u'["PRP", null, "me"]', u'.', u'["symbol", null, "."]', u':-(', u'["EMOASC", null, ":-("]', u'@real_trump', u'["mention", null, "@real_trump"]'), (3, 3333, u'[15]', u'[0, 1]', u'[0, 1]', u'bad', u'bad', u'{"bad": 1, "ba^7d": 1, "bad^5": 1, "b^4a^4d^5": 1, "b^7a^6d": 1}', 5, u'JJ', u'["negative", -0.7249999999999999]', None, None, None, None, None, None, None, None, u'a', u'["DT", null, "a"]', u'news', u'["NN", null, "news"]', u',', u'["symbol", null, ","]', u'which', u'["WDT", null, "which"]', u'we', u'["PRP", null, "we"]', u'can', u'["MD", null, "can"]'), (4, 3333, u'[15]', u'[0, 17]', u'[0, 13]', u'#shetlife', u'#shetlif', u'{"#shetlife": 2}', 2, u'hashtag', u'["negative", -0.7249999999999999]', u'acept', u'["VB", null, "acept"]', u'.', u'["symbol", null, "."]', u'-(', u'["EMOASC", null, "-("]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', u':-(', u'["EMOASC", null, ":-("]', u'http://www.noooo.com', u'["URL", null, "http://www.noooo.com"]', None, None, None, None, None, None, None, None), (5, 4444, u'[13]', u'[0, 0]', u'[0, 0]', u'tiny', u'tini', u'{"tiny": 6}', 6, u'JJ', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'model', u'["NN", null, "model"]', u',', u'["symbol", null, ","]', u'which', u'["WDT", null, "which"]', u'we', u'["PRP", null, "we"]', u'can', u'["MD", null, "can"]'), (6, 4444, u'[13]', u'[0, 15]', u'[0, 10]', u'big', u'big', u'{"bi^3g": 1, "bi^15g": 1}', 2, u'NN', u'["neutral", 0.0]', u'can', u'["MD", null, "can"]', u'use', u'["VB", null, "use"]', u'for', u'["IN", null, "for"]', u'explain', u'["VB", null, "explain"]', u'a', u'["DT", null, "a"]', u'things', u'["NNS", null, "thing"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None), (7, 5555, u'[8, 2, 11, 4]', u'[0, 5]', u'[0, 5]', u'big', u'big', u'{"big": 3}', 3, u'JJ', u'["neutral", 0.0]', u'tiny', u'["JJ", null, "tini"]', u'model', u'["NN", null, "model"]', u',', u'["symbol", null, ","]', u'but', u'["CC", null, "but"]', u'a', u'["DT", null, "a"]', u'explanation', u'["NN", null, "explan"]', u'.', u'["symbol", null, "."]', u'right', u'["UH", null, "right"]', u'?', u'["symbol", null, "?"]', u'what', u'["WP", null, "what"]'), (8, 5555, u'[8, 2, 11, 4]', u'[3, 0]', u'[3, 0]', u'but', u'but', u'{"bu^5t^4": 1, "b^5u^4t^4": 1}', 2, u'NNP', u'["neutral", 0.0]', u'?', u'["symbol", null, "?"]', 1, u'["number", null, "1"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["FW", {"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}, "but"]', u'you', u'["FW", null, "you"]', None, None, None, None), (9, 5555, u'[8, 2, 11, 4]', u'[3, 2]', u'[3, 1]', u'you', u'you', u'{"yo^6u": 1, "y^6ou": 1}', 2, u'NN', u'["neutral", 0.0]', 1, u'["number", null, "1"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'but', u'["FW", {"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}, "but"]', u'you', u'["FW", null, "you"]', None, None, None, None, None, None), (10, 5555, u'[8, 2, 11, 4]', u'[3, 4]', u'[3, 2]', u'but', u'but', u'{"b^6ut": 1, "b^5ut^4": 1, "b^5u^5t": 1}', 3, u'FW', u'["neutral", 0.0]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t^4": 1, "b^5u^4t^4": 1}, "but"]', u'you', u'["NN", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'you', u'["FW", null, "you"]', None, None, None, None, None, None, None, None), (11, 6666, u'[3, 9]', u'[0, 0]', u'[0, 0]', u'tiny', u'tini', u'{"tin^3y^2": 1, "tiny": 2}', 3, u'JJ', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t": 1, "b^5ut": 1}, "but"]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]'), (12, 6666, u'[3, 9]', u'[1, 0]', u'[1, 0]', u'but', u'but', u'{"bu^5t": 1, "b^5ut": 1}', 2, u'NNP', u'["neutral", 0.0]', None, None, None, None, u'tiny', u'["JJ", {"tin^3y^2": 1, "tiny": 2}, "tini"]', u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]'), (13, 6666, u'[3, 9]', u'[1, 2]', u'[1, 1]', u'you', u'you', u'{"yo^6u": 1, "y^6ou": 1}', 2, u'JJ', u'["neutral", 0.0]', None, None, u'tiny', u'["JJ", {"tin^3y^2": 1, "tiny": 2}, "tini"]', u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t": 1, "b^5ut": 1}, "but"]', u'but', u'["CC", {"b^6ut": 1, "b^5ut": 2}, "but"]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]'), (14, 6666, u'[3, 9]', u'[1, 4]', u'[1, 2]', u'but', u'but', u'{"b^6ut": 1, "b^5ut": 2}', 3, u'CC', u'["neutral", 0.0]', u'tiny', u'["JJ", {"tin^3y^2": 1, "tiny": 2}, "tini"]', u'surprise', u'["NN", null, "surpris"]', u'.', u'["symbol", null, "."]', u'but', u'["NNP", {"bu^5t": 1, "b^5ut": 1}, "but"]', u'you', u'["JJ", {"yo^6u": 1, "y^6ou": 1}, "you"]', u'you', u'["VBD", null, "you"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'\U0001f308', u'["EMOIMG", null, "\\ud83c\\udf08"]'), (15, 7777, u'[19]', u'[0, 11]', u'[0, 11]', u'realy', u'reali', u'{"realy": 1, "real^3y": 1, "re^5al^4y^3": 1}', 3, u'RB', u'["positive", 0.27]', u'me', u'["PRP", null, "me"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', u',', u'["symbol", null, ","]', u'but', u'["MD", null, "but"]', u'i', u'["PRP", null, "i"]', u'liked', u'["VBD", null, "like"]', u'it', u'["PRP", null, "it"]', u':p', u'["EMOASC", null, ":p"]', u'=)', u'["EMOASC", null, "=)"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]')]
# right_baseline_not_freezed_not_full_repetativ = [(u'also++very++pity++for++me', u'also++veri++piti++for++me', 5, 1, u'[0, 2, 2, 0, 0]', u'[0, 4, 4, 0, 0]', u'[0, 1, 1, 0, 0]', u'[0, 3, 4, 0, 0]', None, None), (u'it++was++also++very', u'it++was++also++veri', 4, 1, u'[0, 0, 0, 2]', u'[0, 0, 0, 4]', u'[0, 0, 0, 1]', u'[0, 0, 0, 3]', None, None), (u'.++:-(++@real_trump++#shetlife', u'.++:-(++@real_trump++#shetlif', 4, 1, u'[1, 1, 0, 0]', u'[1, 1, 0, 0]', None, None, None, None), (u'.++but++it', u'.++but++it', 3, 1, None, None, None, None, None, None), (u'to', u'to', 1, 1, None, None, None, None, None, None), (u':-(++@real_trump++#shetlife++#readytogo++http://www.absurd.com', u':-(++@real_trump++#shetlif++#readytogo++http://www.absurd.com', 5, 1, u'[1, 0, 0, 0, 0]', u'[1, 0, 0, 0, 0]', None, None, None, None), (u'glad++to++se++you++-)', u'glad++to++se++you++-)', 5, 1, u'[1, 0, 1, 0, 1]', u'[1, 0, 1, 0, 1]', None, None, None, None), (u'i++loved++it++.++but', u'i++love++it++.++but', 5, 1, None, None, None, None, None, None), (u'me++.', u'me++.', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'i++loved', u'i++love', 2, 1, None, None, None, None, None, None), (u'.++:-(++@real_trump', u'.++:-(++@real_trump', 3, 1, u'[1, 1, 0]', u'[1, 1, 0]', None, None, None, None), (u'i++loved++it', u'i++love++it', 3, 1, None, None, None, None, None, None), (u'-)', u'-)', 1, 1, u'1', u'1', None, None, u'1', None), (u'you++-)', u'you++-)', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'me++.++:-(', u'me++.++:-(', 3, 1, u'[0, 1, 1]', u'[0, 1, 1]', None, None, None, None), (u'.++:-(++@real_trump++#shetlife++#readytogo', u'.++:-(++@real_trump++#shetlif++#readytogo', 5, 1, u'[1, 1, 0, 0, 0]', u'[1, 1, 0, 0, 0]', None, None, None, None), (u'but++it', u'but++it', 2, 1, None, None, None, None, None, None), (u'pity++for++me++.', u'piti++for++me++.', 4, 1, u'[2, 0, 0, 1]', u'[4, 0, 0, 1]', u'[1, 0, 0, 0]', u'[4, 0, 0, 0]', None, None), (u'for++me++.++:-(', u'for++me++.++:-(', 4, 1, u'[0, 0, 1, 1]', u'[0, 0, 1, 1]', None, None, None, None), (u'me++.++:-(++@real_trump++#shetlife++#readytogo', u'me++.++:-(++@real_trump++#shetlif++#readytogo', 6, 1, u'[0, 1, 1, 0, 0, 0]', u'[0, 1, 1, 0, 0, 0]', None, None, None, None), (u'it++was++also++very++pity', u'it++was++also++veri++piti', 5, 1, u'[0, 0, 0, 2, 2]', u'[0, 0, 0, 4, 4]', u'[0, 0, 0, 1, 1]', u'[0, 0, 0, 3, 4]', None, None), (u'very++pity++for++me++.++:-(', u'veri++piti++for++me++.++:-(', 6, 1, u'[2, 2, 0, 0, 1, 1]', u'[4, 4, 0, 0, 1, 1]', u'[1, 1, 0, 0, 0, 0]', u'[3, 4, 0, 0, 0, 0]', None, None), (u'to++se++you++-)', u'to++se++you++-)', 4, 1, u'[0, 1, 0, 1]', u'[0, 1, 0, 1]', None, None, None, None), (u'http://www.absurd.com', u'http://www.absurd.com', 1, 1, None, None, None, None, None, None), (u'it++was++also++very++pity++for', u'it++was++also++veri++piti++for', 6, 1, u'[0, 0, 0, 2, 2, 0]', u'[0, 0, 0, 4, 4, 0]', u'[0, 0, 0, 1, 1, 0]', u'[0, 0, 0, 3, 4, 0]', None, None), (u'very++pity++for', u'veri++piti++for', 3, 1, u'[2, 2, 0]', u'[4, 4, 0]', u'[1, 1, 0]', u'[3, 4, 0]', None, None), (u'it++.', u'it++.', 2, 1, None, None, None, None, None, None), (u'loved', u'love', 1, 1, None, None, None, None, None, None), (u'@real_trump', u'@real_trump', 1, 1, None, None, None, None, None, None), (u'se++you++-)', u'se++you++-)', 3, 1, u'[1, 0, 1]', u'[1, 0, 1]', None, None, None, None), (u'glad++to', u'glad++to', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'but++it++was++also++very', u'but++it++was++also++veri', 5, 1, u'[0, 0, 0, 0, 2]', u'[0, 0, 0, 0, 4]', u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 3]', None, None), (u'also', u'also', 1, 1, None, None, None, None, None, None), (u'for++me++.', u'for++me++.', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', None, None, None, None), (u'loved++it++.++but++it', u'love++it++.++but++it', 5, 1, None, None, None, None, None, None), (u'was++also', u'was++also', 2, 1, None, None, None, None, None, None), (u'it++was++also', u'it++was++also', 3, 1, None, None, None, None, None, None), (u'loved++it', u'love++it', 2, 1, None, None, None, None, None, None), (u'pity++for++me++.++:-(', u'piti++for++me++.++:-(', 5, 1, u'[2, 0, 0, 1, 1]', u'[4, 0, 0, 1, 1]', u'[1, 0, 0, 0, 0]', u'[4, 0, 0, 0, 0]', None, None), (u'loved++it++.++but', u'love++it++.++but', 4, 1, None, None, None, None, None, None), (u'@real_trump++#shetlife++#readytogo++http://www.absurd.com', u'@real_trump++#shetlif++#readytogo++http://www.absurd.com', 4, 1, None, None, None, None, None, None), (u'.++:-(++@real_trump++#shetlife++#readytogo++http://www.absurd.com', u'.++:-(++@real_trump++#shetlif++#readytogo++http://www.absurd.com', 6, 1, u'[1, 1, 0, 0, 0, 0]', u'[1, 1, 0, 0, 0, 0]', None, None, None, None), (u'pity', u'piti', 1, 4, u'2', u'4', u'1', u'4', u'2', u'1'), (u'me++.++:-(++@real_trump', u'me++.++:-(++@real_trump', 4, 1, u'[0, 1, 1, 0]', u'[0, 1, 1, 0]', None, None, None, None), (u'but++it++was++also++very++pity', u'but++it++was++also++veri++piti', 6, 1, u'[0, 0, 0, 0, 2, 2]', u'[0, 0, 0, 0, 4, 4]', u'[0, 0, 0, 0, 1, 1]', u'[0, 0, 0, 0, 3, 4]', None, None), (u'i++loved++it++.', u'i++love++it++.', 4, 1, None, None, None, None, None, None), (u'very++pity++for++me++.', u'veri++piti++for++me++.', 5, 1, u'[2, 2, 0, 0, 1]', u'[4, 4, 0, 0, 1]', u'[1, 1, 0, 0, 0]', u'[3, 4, 0, 0, 0]', None, None), (u'#readytogo++http://www.absurd.com', u'#readytogo++http://www.absurd.com', 2, 1, None, None, None, None, None, None), (u'#readytogo', u'#readytogo', 1, 1, None, None, None, None, None, None), (u'also++very++pity++for++me++.', u'also++veri++piti++for++me++.', 6, 1, u'[0, 2, 2, 0, 0, 1]', u'[0, 4, 4, 0, 0, 1]', u'[0, 1, 1, 0, 0, 0]', u'[0, 3, 4, 0, 0, 0]', None, None), (u'se++you', u'se++you', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'se', u'se', 1, 1, u'1', u'1', None, None, u'1', None), (u'for++me++.++:-(++@real_trump++#shetlife', u'for++me++.++:-(++@real_trump++#shetlif', 6, 1, u'[0, 0, 1, 1, 0, 0]', u'[0, 0, 1, 1, 0, 0]', None, None, None, None), (u'but++it++was', u'but++it++was', 3, 1, None, None, None, None, None, None), (u'glad++to++se++you', u'glad++to++se++you', 4, 1, u'[1, 0, 1, 0]', u'[1, 0, 1, 0]', None, None, None, None), (u'#shetlife++#readytogo', u'#shetlif++#readytogo', 2, 1, None, None, None, None, None, None), (u'very++pity++for++me', u'veri++piti++for++me', 4, 1, u'[2, 2, 0, 0]', u'[4, 4, 0, 0]', u'[1, 1, 0, 0]', u'[3, 4, 0, 0]', None, None), (u'@real_trump++#shetlife++#readytogo', u'@real_trump++#shetlif++#readytogo', 3, 1, None, None, None, None, None, None), (u'#shetlife++#readytogo++http://www.absurd.com', u'#shetlif++#readytogo++http://www.absurd.com', 3, 1, None, None, None, None, None, None), (u':-(++@real_trump', u':-(++@real_trump', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'pity++for++me++.++:-(++@real_trump', u'piti++for++me++.++:-(++@real_trump', 6, 1, u'[2, 0, 0, 1, 1, 0]', u'[4, 0, 0, 1, 1, 0]', u'[1, 0, 0, 0, 0, 0]', u'[4, 0, 0, 0, 0, 0]', None, None), (u'.++but++it++was++also', u'.++but++it++was++also', 5, 1, None, None, None, None, None, None), (u'it++.++but++it++was', u'it++.++but++it++was', 5, 1, None, None, None, None, None, None), (u'was++also++very++pity++for', u'was++also++veri++piti++for', 5, 1, u'[0, 0, 2, 2, 0]', u'[0, 0, 4, 4, 0]', u'[0, 0, 1, 1, 0]', u'[0, 0, 3, 4, 0]', None, None), (u'also++very', u'also++veri', 2, 1, u'[0, 2]', u'[0, 4]', u'[0, 1]', u'[0, 3]', None, None), (u'to++se', u'to++se', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'pity++for', u'piti++for', 2, 1, u'[2, 0]', u'[4, 0]', u'[1, 0]', u'[4, 0]', None, None), (u'to++se++you', u'to++se++you', 3, 1, u'[0, 1, 0]', u'[0, 1, 0]', None, None, None, None), (u'for++me++.++:-(++@real_trump', u'for++me++.++:-(++@real_trump', 5, 1, u'[0, 0, 1, 1, 0]', u'[0, 0, 1, 1, 0]', None, None, None, None), (u'also++very++pity', u'also++veri++piti', 3, 1, u'[0, 2, 2]', u'[0, 4, 4]', u'[0, 1, 1]', u'[0, 3, 4]', None, None), (u'very', u'veri', 1, 3, u'2', u'4', u'1', u'3', u'2', u'1'), (u'it++.++but++it++was++also', u'it++.++but++it++was++also', 6, 1, None, None, None, None, None, None), (u'was++also++very', u'was++also++veri', 3, 1, u'[0, 0, 2]', u'[0, 0, 4]', u'[0, 0, 1]', u'[0, 0, 3]', None, None), (u'loved++it++.++but++it++was', u'love++it++.++but++it++was', 6, 1, None, None, None, None, None, None), (u'pity++for++me', u'piti++for++me', 3, 1, u'[2, 0, 0]', u'[4, 0, 0]', u'[1, 0, 0]', u'[4, 0, 0]', None, None), (u'me++.++:-(++@real_trump++#shetlife', u'me++.++:-(++@real_trump++#shetlif', 5, 1, u'[0, 1, 1, 0, 0]', u'[0, 1, 1, 0, 0]', None, None, None, None), (u'very++pity', u'veri++piti', 2, 1, u'[2, 2]', u'[4, 4]', u'[1, 1]', u'[3, 4]', None, None), (u'was++also++very++pity++for++me', u'was++also++veri++piti++for++me', 6, 1, u'[0, 0, 2, 2, 0, 0]', u'[0, 0, 4, 4, 0, 0]', u'[0, 0, 1, 1, 0, 0]', u'[0, 0, 3, 4, 0, 0]', None, None), (u'also++very++pity++for', u'also++veri++piti++for', 4, 1, u'[0, 2, 2, 0]', u'[0, 4, 4, 0]', u'[0, 1, 1, 0]', u'[0, 3, 4, 0]', None, None), (u'but++it++was++also', u'but++it++was++also', 4, 1, None, None, None, None, None, None), (u'@real_trump++#shetlife', u'@real_trump++#shetlif', 2, 1, None, None, None, None, None, None), (u'it++.++but++it', u'it++.++but++it', 4, 1, None, None, None, None, None, None), (u'.++but++it++was', u'.++but++it++was', 4, 1, None, None, None, None, None, None), (u':-(++@real_trump++#shetlife', u':-(++@real_trump++#shetlif', 3, 1, u'[1, 0, 0]', u'[1, 0, 0]', None, None, None, None), (u'glad++to++se', u'glad++to++se', 3, 1, u'[1, 0, 1]', u'[1, 0, 1]', None, None, None, None), (u':-(++@real_trump++#shetlife++#readytogo', u':-(++@real_trump++#shetlif++#readytogo', 4, 1, u'[1, 0, 0, 0]', u'[1, 0, 0, 0]', None, None, None, None), (u'.++:-(', u'.++:-(', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'loved++it++.', u'love++it++.', 3, 1, None, None, None, None, None, None), (u'glad', u'glad', 1, 1, u'1', u'1', None, None, u'1', None), (u'.++but++it++was++also++very', u'.++but++it++was++also++veri', 6, 1, u'[0, 0, 0, 0, 0, 2]', u'[0, 0, 0, 0, 0, 4]', u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 3]', None, None), (u'was++also++very++pity', u'was++also++veri++piti', 4, 1, u'[0, 0, 2, 2]', u'[0, 0, 4, 4]', u'[0, 0, 1, 1]', u'[0, 0, 3, 4]', None, None), (u'i++loved++it++.++but++it', u'i++love++it++.++but++it', 6, 1, None, None, None, None, None, None), (u'it++.++but', u'it++.++but', 3, 1, None, None, None, None, None, None), (u',++which', u',++which', 2, 2, None, None, None, None, None, None), (u'bad++news++,++which', u'bad++news++,++which', 4, 1, u'[4, 0, 0, 0]', u'[7, 0, 0, 0]', u'[1, 0, 0, 0]', u'[5, 0, 0, 0]', None, None), (u',++which++we++can++not', u',++which++we++can++not', 5, 1, None, None, None, None, None, None), (u'tiny++model++,++which++we', u'tini++model++,++which++we', 5, 1, u'[0, 1, 0, 0, 0]', u'[0, 2, 0, 0, 0]', u'[1, 0, 0, 0, 0]', u'[6, 0, 0, 0, 0]', None, None), (u'acept++.++-(', u'acept++.++-(', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', None, None, None, None), (u',++which++we++can', u',++which++we++can', 4, 2, None, None, None, None, None, None), (u'acept++.', u'acept++.', 2, 1, None, None, None, None, None, None), (u',++which++we', u',++which++we', 3, 2, None, None, None, None, None, None), (u'not++acept++.++-(++\U0001f62b', u'not++acept++.++-(++\U0001f62b', 5, 1, u'[0, 0, 0, 1, 1]', u'[0, 0, 0, 1, 1]', None, None, None, None), (u'tiny++model++,++which', u'tini++model++,++which', 4, 1, u'[0, 1, 0, 0]', u'[0, 2, 0, 0]', u'[1, 0, 0, 0]', u'[6, 0, 0, 0]', None, None), (u'a++bad++news++,', u'a++bad++news++,', 4, 1, u'[0, 4, 0, 0]', u'[0, 7, 0, 0]', u'[0, 1, 0, 0]', u'[0, 5, 0, 0]', None, None), (u'can++not++acept++.', u'can++not++acept++.', 4, 1, None, None, None, None, None, None), (u'-(++\U0001f62b', u'-(++\U0001f62b', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'which++we++can', u'which++we++can', 3, 2, None, None, None, None, None, None), (u'-(++\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'-(++\U0001f62b++:-(++#shetlif++http://www.noooo.com', 5, 1, u'[1, 1, 1, 0, 0]', u'[1, 1, 1, 0, 0]', u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 2, 0]', None, None), (u'explain++a++big', u'explain++a++big', 3, 1, u'[0, 0, 2]', u'[0, 0, 2]', u'[0, 0, 1]', u'[0, 0, 2]', None, None), (u'we++can', u'we++can', 2, 2, None, None, None, None, None, None), (u'can++use', u'can++use', 2, 1, None, None, None, None, None, None), (u'we++can++use++for++explain', u'we++can++use++for++explain', 5, 1, None, None, None, None, None, None), (u',++which++we++can++use++for', u',++which++we++can++use++for', 6, 1, None, None, None, None, None, None), (u'use++for++explain', u'use++for++explain', 3, 1, None, None, None, None, None, None), (u'explain++a++big++things++.', u'explain++a++big++thing++.', 5, 1, u'[0, 0, 2, 0, 0]', u'[0, 0, 2, 0, 0]', u'[0, 0, 1, 0, 0]', u'[0, 0, 2, 0, 0]', None, None), (u'a++bad++news', u'a++bad++news', 3, 1, u'[0, 4, 0]', u'[0, 7, 0]', u'[0, 1, 0]', u'[0, 5, 0]', None, None), (u'bad++news++,++which++we', u'bad++news++,++which++we', 5, 1, u'[4, 0, 0, 0, 0]', u'[7, 0, 0, 0, 0]', u'[1, 0, 0, 0, 0]', u'[5, 0, 0, 0, 0]', None, None), (u'for++explain', u'for++explain', 2, 1, None, None, None, None, None, None), (u'can++use++for++explain++a', u'can++use++for++explain++a', 5, 1, None, None, None, None, None, None), (u'we++can++not', u'we++can++not', 3, 1, None, None, None, None, None, None), (u'explain', u'explain', 1, 1, None, None, None, None, None, None), (u'-(', u'-(', 1, 1, u'1', u'1', None, None, u'1', None), (u'bad++news++,++which++we++can', u'bad++news++,++which++we++can', 6, 1, u'[4, 0, 0, 0, 0, 0]', u'[7, 0, 0, 0, 0, 0]', u'[1, 0, 0, 0, 0, 0]', u'[5, 0, 0, 0, 0, 0]', None, None), (u'bad++news', u'bad++news', 2, 1, u'[4, 0]', u'[7, 0]', u'[1, 0]', u'[5, 0]', None, None), (u'news++,++which++we++can', u'news++,++which++we++can', 5, 1, None, None, None, None, None, None), (u'news++,++which++we', u'news++,++which++we', 4, 1, None, None, None, None, None, None), (u'a++bad++news++,++which', u'a++bad++news++,++which', 5, 1, u'[0, 4, 0, 0, 0]', u'[0, 7, 0, 0, 0]', u'[0, 1, 0, 0, 0]', u'[0, 5, 0, 0, 0]', None, None), (u'big++things++.', u'big++thing++.', 3, 1, u'[2, 0, 0]', u'[2, 0, 0]', u'[1, 0, 0]', u'[2, 0, 0]', None, None), (u'things++.', u'thing++.', 2, 1, None, None, None, None, None, None), (u'things', u'thing', 1, 1, None, None, None, None, None, None), (u'-(++\U0001f62b++:-(', u'-(++\U0001f62b++:-(', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, None, None), (u'model++,++which++we', u'model++,++which++we', 4, 1, u'[1, 0, 0, 0]', u'[2, 0, 0, 0]', None, None, None, None), (u'#shetlife', u'#shetlif', 1, 3, None, None, u'1', u'2', None, u'1'), (u'can++not++acept++.++-(++\U0001f62b', u'can++not++acept++.++-(++\U0001f62b', 6, 1, u'[0, 0, 0, 0, 1, 1]', u'[0, 0, 0, 0, 1, 1]', None, None, None, None), (u'we++can++not++acept++.', u'we++can++not++acept++.', 5, 1, None, None, None, None, None, None), (u'big++things', u'big++thing', 2, 1, u'[2, 0]', u'[2, 0]', u'[1, 0]', u'[2, 0]', None, None), (u'use++for++explain++a', u'use++for++explain++a', 4, 1, None, None, None, None, None, None), (u'not++acept', u'not++acept', 2, 1, None, None, None, None, None, None), (u'acept++.++-(++\U0001f62b++:-(', u'acept++.++-(++\U0001f62b++:-(', 5, 1, u'[0, 0, 1, 1, 1]', u'[0, 0, 1, 1, 1]', None, None, None, None), (u'for++explain++a++big++things++.', u'for++explain++a++big++thing++.', 6, 1, u'[0, 0, 0, 2, 0, 0]', u'[0, 0, 0, 2, 0, 0]', u'[0, 0, 0, 1, 0, 0]', u'[0, 0, 0, 2, 0, 0]', None, None), (u'\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'\U0001f62b++:-(++#shetlif++http://www.noooo.com', 4, 1, u'[1, 1, 0, 0]', u'[1, 1, 0, 0]', u'[0, 0, 1, 0]', u'[0, 0, 2, 0]', None, None), (u'we++can++use', u'we++can++use', 3, 1, None, None, None, None, None, None), (u'which++we++can++use++for++explain', u'which++we++can++use++for++explain', 6, 1, None, None, None, None, None, None), (u'not++acept++.++-(', u'not++acept++.++-(', 4, 1, u'[0, 0, 0, 1]', u'[0, 0, 0, 1]', None, None, None, None), (u':-(++#shetlife', u':-(++#shetlif', 2, 1, u'[1, 0]', u'[1, 0]', u'[0, 1]', u'[0, 2]', None, None), (u'which++we++can++use', u'which++we++can++use', 4, 1, None, None, None, None, None, None), (u'explain++a', u'explain++a', 2, 1, None, None, None, None, None, None), (u'.++-(++\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'.++-(++\U0001f62b++:-(++#shetlif++http://www.noooo.com', 6, 1, u'[0, 1, 1, 1, 0, 0]', u'[0, 1, 1, 1, 0, 0]', u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 0, 2, 0]', None, None), (u'not++acept++.', u'not++acept++.', 3, 1, None, None, None, None, None, None), (u'a++big++things', u'a++big++thing', 3, 1, u'[0, 2, 0]', u'[0, 2, 0]', u'[0, 1, 0]', u'[0, 2, 0]', None, None), (u'.++-(', u'.++-(', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'a++bad', u'a++bad', 2, 1, u'[0, 4]', u'[0, 7]', u'[0, 1]', u'[0, 5]', None, None), (u'use++for', u'use++for', 2, 1, None, None, None, None, None, None), (u'can++not++acept++.++-(', u'can++not++acept++.++-(', 5, 1, u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 1]', None, None, None, None), (u'a++big++things++.', u'a++big++thing++.', 4, 1, u'[0, 2, 0, 0]', u'[0, 2, 0, 0]', u'[0, 1, 0, 0]', u'[0, 2, 0, 0]', None, None), (u'news', u'news', 1, 1, None, None, None, None, None, None), (u'which++we++can++not', u'which++we++can++not', 4, 1, None, None, None, None, None, None), (u'http://www.noooo.com', u'http://www.noooo.com', 1, 1, None, None, None, None, None, None), (u'-(++\U0001f62b++:-(++#shetlife', u'-(++\U0001f62b++:-(++#shetlif', 4, 1, u'[1, 1, 1, 0]', u'[1, 1, 1, 0]', u'[0, 0, 0, 1]', u'[0, 0, 0, 2]', None, None), (u'acept++.++-(++\U0001f62b', u'acept++.++-(++\U0001f62b', 4, 1, u'[0, 0, 1, 1]', u'[0, 0, 1, 1]', None, None, None, None), (u'which++we++can++not++acept', u'which++we++can++not++acept', 5, 1, None, None, None, None, None, None), (u':-(', u':-(', 1, 2, u'2', u'2', None, None, u'2', None), (u'news++,++which++we++can++not', u'news++,++which++we++can++not', 6, 1, None, None, None, None, None, None), (u'can++use++for++explain', u'can++use++for++explain', 4, 1, None, None, None, None, None, None), (u':-(++#shetlife++http://www.noooo.com', u':-(++#shetlif++http://www.noooo.com', 3, 1, u'[1, 0, 0]', u'[1, 0, 0]', u'[0, 1, 0]', u'[0, 2, 0]', None, None), (u'not', u'not', 1, 1, None, None, None, None, None, None), (u',++which++we++can++not++acept', u',++which++we++can++not++acept', 6, 1, None, None, None, None, None, None), (u'which++we++can++use++for', u'which++we++can++use++for', 5, 1, None, None, None, None, None, None), (u'can++not++acept', u'can++not++acept', 3, 1, None, None, None, None, None, None), (u'explain++a++big++things', u'explain++a++big++thing', 4, 1, u'[0, 0, 2, 0]', u'[0, 0, 2, 0]', u'[0, 0, 1, 0]', u'[0, 0, 2, 0]', None, None), (u'can', u'can', 1, 2, None, None, None, None, None, None), (u'tiny++model++,++which++we++can', u'tini++model++,++which++we++can', 6, 1, u'[0, 1, 0, 0, 0, 0]', u'[0, 2, 0, 0, 0, 0]', u'[1, 0, 0, 0, 0, 0]', u'[6, 0, 0, 0, 0, 0]', None, None), (u'acept++.++-(++\U0001f62b++:-(++#shetlife', u'acept++.++-(++\U0001f62b++:-(++#shetlif', 6, 1, u'[0, 0, 1, 1, 1, 0]', u'[0, 0, 1, 1, 1, 0]', u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 2]', None, None), (u'use++for++explain++a++big++things', u'use++for++explain++a++big++thing', 6, 1, u'[0, 0, 0, 0, 2, 0]', u'[0, 0, 0, 0, 2, 0]', u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 0, 2, 0]', None, None), (u'we++can++use++for++explain++a', u'we++can++use++for++explain++a', 6, 1, None, None, None, None, None, None), (u'use++for++explain++a++big', u'use++for++explain++a++big', 5, 1, u'[0, 0, 0, 0, 2]', u'[0, 0, 0, 0, 2]', u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 2]', None, None), (u'model++,++which++we++can++use', u'model++,++which++we++can++use', 6, 1, u'[1, 0, 0, 0, 0, 0]', u'[2, 0, 0, 0, 0, 0]', None, None, None, None), (u'which++we', u'which++we', 2, 2, None, None, None, None, None, None), (u'not++acept++.++-(++\U0001f62b++:-(', u'not++acept++.++-(++\U0001f62b++:-(', 6, 1, u'[0, 0, 0, 1, 1, 1]', u'[0, 0, 0, 1, 1, 1]', None, None, None, None), (u'model++,++which++we++can', u'model++,++which++we++can', 5, 1, u'[1, 0, 0, 0, 0]', u'[2, 0, 0, 0, 0]', None, None, None, None), (u'we++can++not++acept', u'we++can++not++acept', 4, 1, None, None, None, None, None, None), (u'use', u'use', 1, 1, None, None, None, None, None, None), (u',++which++we++can++use', u',++which++we++can++use', 5, 1, None, None, None, None, None, None), (u'bad++news++,', u'bad++news++,', 3, 1, u'[4, 0, 0]', u'[7, 0, 0]', u'[1, 0, 0]', u'[5, 0, 0]', None, None), (u'can++use++for', u'can++use++for', 3, 1, None, None, None, None, None, None), (u'news++,', u'news++,', 2, 1, None, None, None, None, None, None), (u'can++not', u'can++not', 2, 1, None, None, None, None, None, None), (u'.++-(++\U0001f62b++:-(', u'.++-(++\U0001f62b++:-(', 4, 1, u'[0, 1, 1, 1]', u'[0, 1, 1, 1]', None, None, None, None), (u'we', u'we', 1, 2, None, None, None, None, None, None), (u'for++explain++a', u'for++explain++a', 3, 1, None, None, None, None, None, None), (u'acept', u'acept', 1, 1, None, None, None, None, None, None), (u'for++explain++a++big', u'for++explain++a++big', 4, 1, u'[0, 0, 0, 2]', u'[0, 0, 0, 2]', u'[0, 0, 0, 1]', u'[0, 0, 0, 2]', None, None), (u'a++bad++news++,++which++we', u'a++bad++news++,++which++we', 6, 1, u'[0, 4, 0, 0, 0, 0]', u'[0, 7, 0, 0, 0, 0]', u'[0, 1, 0, 0, 0, 0]', u'[0, 5, 0, 0, 0, 0]', None, None), (u'#shetlife++http://www.noooo.com', u'#shetlif++http://www.noooo.com', 2, 1, None, None, u'[1, 0]', u'[2, 0]', None, None), (u'\U0001f62b++:-(', u'\U0001f62b++:-(', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'.++-(++\U0001f62b', u'.++-(++\U0001f62b', 3, 1, u'[0, 1, 1]', u'[0, 1, 1]', None, None, None, None), (u'we++can++not++acept++.++-(', u'we++can++not++acept++.++-(', 6, 1, u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 1]', None, None, None, None), (u'news++,++which', u'news++,++which', 3, 1, None, None, None, None, None, None), (u'which', u'which', 1, 2, None, None, None, None, None, None), (u'model++,++which', u'model++,++which', 3, 1, u'[1, 0, 0]', u'[2, 0, 0]', None, None, None, None), (u'we++can++use++for', u'we++can++use++for', 4, 1, None, None, None, None, None, None), (u'which++we++can++not++acept++.', u'which++we++can++not++acept++.', 6, 1, None, None, None, None, None, None), (u'.++-(++\U0001f62b++:-(++#shetlife', u'.++-(++\U0001f62b++:-(++#shetlif', 5, 1, u'[0, 1, 1, 1, 0]', u'[0, 1, 1, 1, 0]', u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 2]', None, None), (u'can++use++for++explain++a++big', u'can++use++for++explain++a++big', 6, 1, u'[0, 0, 0, 0, 0, 2]', u'[0, 0, 0, 0, 0, 2]', u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 2]', None, None), (u'\U0001f62b++:-(++#shetlife', u'\U0001f62b++:-(++#shetlif', 3, 1, u'[1, 1, 0]', u'[1, 1, 0]', u'[0, 0, 1]', u'[0, 0, 2]', None, None), (u'for++explain++a++big++things', u'for++explain++a++big++thing', 5, 1, u'[0, 0, 0, 2, 0]', u'[0, 0, 0, 2, 0]', u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 2, 0]', None, None), (u'model', u'model', 1, 2, u'1', u'2', None, None, u'1', None), (u'but++a++big++explanation++.++right', u'but++a++big++explan++.++right', 6, 1, u'[0, 0, 0, 1, 0, 1]', u'[0, 0, 0, 1, 0, 1]', u'[0, 0, 1, 0, 0, 0]', u'[0, 0, 3, 0, 0, 0]', None, None), (u'what', u'what', 1, 1, None, None, None, None, None, None), (u'do++you++think++about', u'do++you++think++about', 4, 1, u'[0, 1, 0, 0]', u'[0, 1, 0, 0]', None, None, None, None), (u'you++\U0001f600++\U0001f308++\U0001f600', u'you++\U0001f600++\U0001f308++\U0001f600', 4, 1, u'[1, 2, 1, "IGNOR"]', u'[2, 2, 1, "IGNOR"]', None, None, None, None), (u'it++?++1++\U0001f62b', u'it++?++1++\U0001f62b', 4, 1, u'[0, 1, 1, 1]', u'[0, 1, 1, 1]', None, None, None, None), (u'a++big', u'a++big', 2, 2, u'[0, 2]', u'[0, 2]', u'[0, 2]', u'[0, 5]', None, None), (u'1++\U0001f62b++1++.', u'1++\U0001f62b++1++.', 4, 1, u'[2, 1, "IGNOR", 0]', u'[2, 1, "IGNOR", 0]', None, None, None, None), (u'you++think', u'you++think', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u',++but++a++big++explanation++.', u',++but++a++big++explan++.', 6, 1, u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 1, 0, 0]', u'[0, 0, 0, 3, 0, 0]', None, None), (u'what++do++you++think++about++it', u'what++do++you++think++about++it', 6, 1, u'[0, 0, 1, 0, 0, 0]', u'[0, 0, 1, 0, 0, 0]', None, None, None, None), (u'think++about++it++?++1++\U0001f62b', u'think++about++it++?++1++\U0001f62b', 6, 1, u'[0, 0, 0, 1, 1, 1]', u'[0, 0, 0, 1, 1, 1]', None, None, None, None), (u'but++you', u'but++you', 2, 4, u'[10, 6]', u'[15, 8]', u'[4, 2]', u'[10, 4]', None, None), (u'but++you++\U0001f600++\U0001f308++\U0001f600', u'but++you++\U0001f600++\U0001f308++\U0001f600', 5, 1, u'[3, 1, 2, 1, "IGNOR"]', u'[3, 2, 2, 1, "IGNOR"]', u'[1, 0, 0, 0, "IGNOR"]', u'[3, 0, 0, 0, "IGNOR"]', None, None), (u'.++but', u'.++but', 2, 3, u'[0, 4]', u'[0, 7]', u'[0, 2]', u'[0, 4]', None, None), (u'big++explanation++.++right++?++what', u'big++explan++.++right++?++what', 6, 1, u'[0, 1, 0, 1, 0, 0]', u'[0, 1, 0, 1, 0, 0]', u'[1, 0, 0, 0, 0, 0]', u'[3, 0, 0, 0, 0, 0]', None, None), (u'tiny++surprise++.++but', u'tini++surpris++.++but', 4, 1, u'[1, 0, 0, 2]', u'[1, 0, 0, 2]', u'[1, 0, 0, 1]', u'[3, 0, 0, 2]', None, None), (u'about++it++?++1++\U0001f62b++1', u'about++it++?++1++\U0001f62b++1', 6, 1, u'[0, 0, 1, 2, 1, "IGNOR"]', u'[0, 0, 1, 2, 1, "IGNOR"]', None, None, None, None), (u'you++think++about++it++?', u'you++think++about++it++?', 5, 1, u'[1, 0, 0, 0, 1]', u'[1, 0, 0, 0, 1]', None, None, None, None), (u'?++what++do', u'?++what++do', 3, 1, None, None, None, None, None, None), (u'\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'\U0001f600++\U0001f308++\U0001f600++\U0001f308', 4, 1, u'[2, 2, "IGNOR", "IGNOR"]', u'[2, 2, "IGNOR", "IGNOR"]', None, None, None, None), (u'what++do++you', u'what++do++you', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', None, None, None, None), (u'but++a++big++explanation++.', u'but++a++big++explan++.', 5, 1, u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 1, 0]', u'[0, 0, 1, 0, 0]', u'[0, 0, 3, 0, 0]', None, None), (u',++but++a', u',++but++a', 3, 1, None, None, None, None, None, None), (u'1', u'1', 1, 2, u'2', u'2', None, None, u'2', None), (u'model++,', u'model++,', 2, 2, u'[1, 0]', u'[2, 0]', None, None, None, None), (u'?++what++do++you++think++about', u'?++what++do++you++think++about', 6, 1, u'[0, 0, 0, 1, 0, 0]', u'[0, 0, 0, 1, 0, 0]', None, None, None, None), (u'what++do++you++think', u'what++do++you++think', 4, 1, u'[0, 0, 1, 0]', u'[0, 0, 1, 0]', None, None, None, None), (u'right++?++what++do', u'right++?++what++do', 4, 1, u'[1, 0, 0, 0]', u'[1, 0, 0, 0]', None, None, None, None), (u'what++do', u'what++do', 2, 1, None, None, None, None, None, None), (u'.++right++?++what', u'.++right++?++what', 4, 1, u'[0, 1, 0, 0]', u'[0, 1, 0, 0]', None, None, None, None), (u'.++but++you', u'.++but++you', 3, 2, u'[0, 4, 4]', u'[0, 7, 4]', u'[0, 2, 2]', u'[0, 4, 4]', None, None), (u'about++it++?++1', u'about++it++?++1', 4, 1, u'[0, 0, 1, 1]', u'[0, 0, 1, 1]', None, None, None, None), (u'tiny', u'tini', 1, 10, u'1', u'1', u'2', u'9', u'1', u'2'), (u'tiny++model', u'tini++model', 2, 2, u'[0, 1]', u'[0, 2]', u'[1, 0]', u'[6, 0]', None, None), (u'think++about', u'think++about', 2, 1, None, None, None, None, None, None), (u'surprise++.++but++you', u'surpris++.++but++you', 4, 1, u'[0, 0, 2, 2]', u'[0, 0, 2, 2]', u'[0, 0, 1, 1]', u'[0, 0, 2, 2]', None, None), (u'explanation++.++right++?++what', u'explan++.++right++?++what', 5, 1, u'[1, 0, 1, 0, 0]', u'[1, 0, 1, 0, 0]', None, None, None, None), (u'1++.++but++you++but', u'1++.++but++you++but', 5, 1, u'[1, 0, 5, 2, "IGNOR"]', u'[1, 0, 10, 2, "IGNOR"]', u'[0, 0, 2, 1, "IGNOR"]', u'[0, 0, 5, 2, "IGNOR"]', None, None), (u'model++,++but++a++big++explanation', u'model++,++but++a++big++explan', 6, 1, u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 0, 3, 0]', None, None), (u'?++1++\U0001f62b++1++.++but', u'?++1++\U0001f62b++1++.++but', 6, 1, u'[1, 2, 1, "IGNOR", 0, 2]', u'[1, 2, 1, "IGNOR", 0, 5]', u'[0, 0, 0, "IGNOR", 0, 1]', u'[0, 0, 0, "IGNOR", 0, 2]', None, None), (u'a++big++explanation', u'a++big++explan', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', u'[0, 1, 0]', u'[0, 3, 0]', None, None), (u'explanation++.++right++?++what++do', u'explan++.++right++?++what++do', 6, 1, u'[1, 0, 1, 0, 0, 0]', u'[1, 0, 1, 0, 0, 0]', None, None, None, None), (u'?++what', u'?++what', 2, 1, None, None, None, None, None, None), (u'right', u'right', 1, 1, u'1', u'1', None, None, u'1', None), (u'you++but++you', u'you++but++you', 3, 2, u'[6, 6, "IGNOR"]', u'[8, 8, "IGNOR"]', u'[2, 2, "IGNOR"]', u'[4, 6, "IGNOR"]', None, None), (u'big++explanation++.++right++?', u'big++explan++.++right++?', 5, 1, u'[0, 1, 0, 1, 0]', u'[0, 1, 0, 1, 0]', u'[1, 0, 0, 0, 0]', u'[3, 0, 0, 0, 0]', None, None), (u'it++?', u'it++?', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'what++do++you++think++about', u'what++do++you++think++about', 5, 1, u'[0, 0, 1, 0, 0]', u'[0, 0, 1, 0, 0]', None, None, None, None), (u'but++you++but++you++\U0001f600++\U0001f308', u'but++you++but++you++\U0001f600++\U0001f308', 6, 1, u'[5, 3, "IGNOR", "IGNOR", 1, 1]', u'[5, 4, "IGNOR", "IGNOR", 1, 1]', u'[2, 1, "IGNOR", "IGNOR", 0, 0]', u'[5, 2, "IGNOR", "IGNOR", 0, 0]', None, None), (u'\U0001f308++\U0001f600++\U0001f308', u'\U0001f308++\U0001f600++\U0001f308', 3, 1, u'[2, 1, "IGNOR"]', u'[2, 1, "IGNOR"]', None, None, None, None), (u'explanation++.++right', u'explan++.++right', 3, 1, u'[1, 0, 1]', u'[1, 0, 1]', None, None, None, None), (u'.', u'.', 1, 7, u'1', u'1', None, None, u'1', None), (u'you', u'you', 1, 8, u'7', u'9', u'2', u'4', u'7', u'2'), (u'surprise++.++but', u'surpris++.++but', 3, 1, u'[0, 0, 2]', u'[0, 0, 2]', u'[0, 0, 1]', u'[0, 0, 2]', None, None), (u'?', u'?', 1, 2, u'1', u'1', None, None, u'1', None), (u'explanation++.++right++?', u'explan++.++right++?', 4, 1, u'[1, 0, 1, 0]', u'[1, 0, 1, 0]', None, None, None, None), (u'it++?++1', u'it++?++1', 3, 1, u'[0, 1, 1]', u'[0, 1, 1]', None, None, None, None), (u'tiny++model++,++but', u'tini++model++,++but', 4, 1, None, None, None, None, None, None), (u'you++think++about++it++?++1', u'you++think++about++it++?++1', 6, 1, u'[1, 0, 0, 0, 1, 1]', u'[1, 0, 0, 0, 1, 1]', None, None, None, None), (u'but++you++\U0001f600++\U0001f308', u'but++you++\U0001f600++\U0001f308', 4, 1, u'[3, 1, 1, 1]', u'[3, 2, 1, 1]', u'[1, 0, 0, 0]', u'[3, 0, 0, 0]', None, None), (u'but++a++big', u'but++a++big', 3, 1, None, None, u'[0, 0, 1]', u'[0, 0, 3]', None, None), (u'tiny++surprise++.++but++you++but', u'tini++surpris++.++but++you++but', 6, 1, u'[1, 0, 0, 5, 2, "IGNOR"]', u'[1, 0, 0, 5, 2, "IGNOR"]', u'[1, 0, 0, 2, 1, "IGNOR"]', u'[3, 0, 0, 5, 2, "IGNOR"]', None, None), (u'do++you++think++about++it', u'do++you++think++about++it', 5, 1, u'[0, 1, 0, 0, 0]', u'[0, 1, 0, 0, 0]', None, None, None, None), (u'big++explanation++.', u'big++explan++.', 3, 1, u'[0, 1, 0]', u'[0, 1, 0]', u'[1, 0, 0]', u'[3, 0, 0]', None, None), (u'think++about++it++?++1', u'think++about++it++?++1', 5, 1, u'[0, 0, 0, 1, 1]', u'[0, 0, 0, 1, 1]', None, None, None, None), (u'.++right', u'.++right', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'explanation++.', u'explan++.', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'but++you++but', u'but++you++but', 3, 2, u'[10, 4, "IGNOR"]', u'[15, 4, "IGNOR"]', u'[4, 2, "IGNOR"]', u'[10, 4, "IGNOR"]', None, None), (u'.++but++you++but++you++\U0001f600', u'.++but++you++but++you++\U0001f600', 6, 1, u'[0, 5, 3, "IGNOR", "IGNOR", 1]', u'[0, 5, 4, "IGNOR", "IGNOR", 1]', u'[0, 2, 1, "IGNOR", "IGNOR", 0]', u'[0, 5, 2, "IGNOR", "IGNOR", 0]', None, None), (u'tiny++surprise', u'tini++surpris', 2, 1, u'[1, 0]', u'[1, 0]', u'[1, 0]', u'[3, 0]', None, None), (u'\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', 5, 1, u'[3, 2, "IGNOR", "IGNOR", "IGNOR"]', u'[3, 2, "IGNOR", "IGNOR", "IGNOR"]', None, None, None, None), (u'model++,++but++a', u'model++,++but++a', 4, 1, None, None, None, None, None, None), (u'you++think++about', u'you++think++about', 3, 1, u'[1, 0, 0]', u'[1, 0, 0]', None, None, None, None), (u'?++what++do++you', u'?++what++do++you', 4, 1, u'[0, 0, 0, 1]', u'[0, 0, 0, 1]', None, None, None, None), (u'explanation', u'explan', 1, 1, u'1', u'1', None, None, u'1', None), (u'you++but++you++\U0001f600++\U0001f308++\U0001f600', u'you++but++you++\U0001f600++\U0001f308++\U0001f600', 6, 1, u'[3, 3, "IGNOR", 2, 1, "IGNOR"]', u'[4, 3, "IGNOR", 2, 1, "IGNOR"]', u'[1, 1, "IGNOR", 0, 0, "IGNOR"]', u'[2, 3, "IGNOR", 0, 0, "IGNOR"]', None, None), (u'?++1', u'?++1', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'do++you++think++about++it++?', u'do++you++think++about++it++?', 6, 1, u'[0, 1, 0, 0, 0, 1]', u'[0, 1, 0, 0, 0, 1]', None, None, None, None), (u'do++you++think', u'do++you++think', 3, 1, u'[0, 1, 0]', u'[0, 1, 0]', None, None, None, None), (u'model++,++but', u'model++,++but', 3, 1, None, None, None, None, None, None), (u'tiny++model++,++but++a', u'tini++model++,++but++a', 5, 1, None, None, None, None, None, None), (u'.++but++you++but++you', u'.++but++you++but++you', 5, 2, u'[0, 10, 6, "IGNOR", "IGNOR"]', u'[0, 15, 8, "IGNOR", "IGNOR"]', u'[0, 4, 2, "IGNOR", "IGNOR"]', u'[0, 10, 4, "IGNOR", "IGNOR"]', None, None), (u'\U0001f62b++1++.++but', u'\U0001f62b++1++.++but', 4, 1, u'[1, 1, 0, 2]', u'[1, 1, 0, 5]', u'[0, 0, 0, 1]', u'[0, 0, 0, 2]', None, None), (u'right++?', u'right++?', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'but++you++\U0001f600', u'but++you++\U0001f600', 3, 1, u'[3, 1, 1]', u'[3, 2, 1]', u'[1, 0, 0]', u'[3, 0, 0]', None, None), (u'model++,++but++a++big', u'model++,++but++a++big', 5, 1, None, None, u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 3]', None, None), (u'\U0001f62b++1++.++but++you++but', u'\U0001f62b++1++.++but++you++but', 6, 1, u'[1, 1, 0, 5, 2, "IGNOR"]', u'[1, 1, 0, 10, 2, "IGNOR"]', u'[0, 0, 0, 2, 1, "IGNOR"]', u'[0, 0, 0, 5, 2, "IGNOR"]', None, None), (u'tiny++surprise++.', u'tini++surpris++.', 3, 1, u'[1, 0, 0]', u'[1, 0, 0]', u'[1, 0, 0]', u'[3, 0, 0]', None, None), (u'?++what++do++you++think', u'?++what++do++you++think', 5, 1, u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 1, 0]', None, None, None, None), (u'.++but++you++but', u'.++but++you++but', 4, 2, u'[0, 10, 4, "IGNOR"]', u'[0, 15, 4, "IGNOR"]', u'[0, 4, 2, "IGNOR"]', u'[0, 10, 4, "IGNOR"]', None, None), (u',++but++a++big++explanation', u',++but++a++big++explan', 5, 1, u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 3, 0]', None, None), (u'\U0001f62b++1++.', u'\U0001f62b++1++.', 3, 1, u'[1, 1, 0]', u'[1, 1, 0]', None, None, None, None), (u'about', u'about', 1, 1, None, None, None, None, None, None), (u'it++?++1++\U0001f62b++1', u'it++?++1++\U0001f62b++1', 5, 1, u'[0, 1, 2, 1, "IGNOR"]', u'[0, 1, 2, 1, "IGNOR"]', None, None, None, None), (u'tiny++model++,++but++a++big', u'tini++model++,++but++a++big', 6, 1, None, None, u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 3]', None, None), (u'you++but', u'you++but', 2, 2, u'[4, 6]', u'[4, 8]', u'[2, 2]', u'[4, 6]', None, None), (u'right++?++what', u'right++?++what', 3, 1, u'[1, 0, 0]', u'[1, 0, 0]', None, None, None, None), (u'\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'\U0001f308++\U0001f600++\U0001f308++\U0001f600', 4, 1, u'[2, 2, "IGNOR", "IGNOR"]', u'[2, 2, "IGNOR", "IGNOR"]', None, None, None, None), (u'but++you++but++you++\U0001f600', u'but++you++but++you++\U0001f600', 5, 1, u'[5, 3, "IGNOR", "IGNOR", 1]', u'[5, 4, "IGNOR", "IGNOR", 1]', u'[2, 1, "IGNOR", "IGNOR", 0]', u'[5, 2, "IGNOR", "IGNOR", 0]', None, None), (u'\U0001f62b++1++.++but++you', u'\U0001f62b++1++.++but++you', 5, 1, u'[1, 1, 0, 2, 2]', u'[1, 1, 0, 5, 2]', u'[0, 0, 0, 1, 1]', u'[0, 0, 0, 2, 2]', None, None), (u'surprise++.++but++you++but++you', u'surpris++.++but++you++but++you', 6, 1, u'[0, 0, 5, 3, "IGNOR", "IGNOR"]', u'[0, 0, 5, 4, "IGNOR", "IGNOR"]', u'[0, 0, 2, 1, "IGNOR", "IGNOR"]', u'[0, 0, 5, 2, "IGNOR", "IGNOR"]', None, None), (u'a++big++explanation++.++right', u'a++big++explan++.++right', 5, 1, u'[0, 0, 1, 0, 1]', u'[0, 0, 1, 0, 1]', u'[0, 1, 0, 0, 0]', u'[0, 3, 0, 0, 0]', None, None), (u'1++.++but', u'1++.++but', 3, 1, u'[1, 0, 2]', u'[1, 0, 5]', u'[0, 0, 1]', u'[0, 0, 2]', None, None), (u'you++but++you++\U0001f600++\U0001f308', u'you++but++you++\U0001f600++\U0001f308', 5, 1, u'[3, 3, "IGNOR", 1, 1]', u'[4, 3, "IGNOR", 1, 1]', u'[1, 1, "IGNOR", 0, 0]', u'[2, 3, "IGNOR", 0, 0]', None, None), (u'\U0001f62b++1', u'\U0001f62b++1', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'surprise++.', u'surpris++.', 2, 1, None, None, None, None, None, None), (u'tiny++model++,', u'tini++model++,', 3, 2, u'[0, 1, 0]', u'[0, 2, 0]', u'[1, 0, 0]', u'[6, 0, 0]', None, None), (u'right++?++what++do++you++think', u'right++?++what++do++you++think', 6, 1, u'[1, 0, 0, 0, 1, 0]', u'[1, 0, 0, 0, 1, 0]', None, None, None, None), (u'?++1++\U0001f62b', u'?++1++\U0001f62b', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, None, None), (u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', 6, 1, u'[1, 3, 2, "IGNOR", "IGNOR", "IGNOR"]', u'[2, 3, 2, "IGNOR", "IGNOR", "IGNOR"]', None, None, None, None), (u'you++but++you++\U0001f600', u'you++but++you++\U0001f600', 4, 1, u'[3, 3, "IGNOR", 1]', u'[4, 3, "IGNOR", 1]', u'[1, 1, "IGNOR", 0]', u'[2, 3, "IGNOR", 0]', None, None), (u'about++it++?', u'about++it++?', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', None, None, None, None), (u'think++about++it', u'think++about++it', 3, 1, None, None, None, None, None, None), (u'surprise++.++but++you++but', u'surpris++.++but++you++but', 5, 1, u'[0, 0, 5, 2, "IGNOR"]', u'[0, 0, 5, 2, "IGNOR"]', u'[0, 0, 2, 1, "IGNOR"]', u'[0, 0, 5, 2, "IGNOR"]', None, None), (u'about++it', u'about++it', 2, 1, None, None, None, None, None, None), (u'1++.++but++you', u'1++.++but++you', 4, 1, u'[1, 0, 2, 2]', u'[1, 0, 5, 2]', u'[0, 0, 1, 1]', u'[0, 0, 2, 2]', None, None), (u'but++you++but++you', u'but++you++but++you', 4, 2, u'[10, 6, "IGNOR", "IGNOR"]', u'[15, 8, "IGNOR", "IGNOR"]', u'[4, 2, "IGNOR", "IGNOR"]', u'[10, 4, "IGNOR", "IGNOR"]', None, None), (u'about++it++?++1++\U0001f62b', u'about++it++?++1++\U0001f62b', 5, 1, u'[0, 0, 1, 1, 1]', u'[0, 0, 1, 1, 1]', None, None, None, None), (u'.++right++?', u'.++right++?', 3, 1, u'[0, 1, 0]', u'[0, 1, 0]', None, None, None, None), (u'tiny++surprise++.++but++you', u'tini++surpris++.++but++you', 5, 1, u'[1, 0, 0, 2, 2]', u'[1, 0, 0, 2, 2]', u'[1, 0, 0, 1, 1]', u'[3, 0, 0, 2, 2]', None, None), (u'you++think++about++it', u'you++think++about++it', 4, 1, u'[1, 0, 0, 0]', u'[1, 0, 0, 0]', None, None, None, None), (u'do++you', u'do++you', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'1++\U0001f62b', u'1++\U0001f62b', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'.++right++?++what++do', u'.++right++?++what++do', 5, 1, u'[0, 1, 0, 0, 0]', u'[0, 1, 0, 0, 0]', None, None, None, None), (u'but++you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'but++you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', 6, 1, u'[3, 1, 2, 2, "IGNOR", "IGNOR"]', u'[3, 2, 2, 2, "IGNOR", "IGNOR"]', u'[1, 0, 0, 0, "IGNOR", "IGNOR"]', u'[3, 0, 0, 0, "IGNOR", "IGNOR"]', None, None), (u'1++\U0001f62b++1', u'1++\U0001f62b++1', 3, 1, u'[2, 1, "IGNOR"]', u'[2, 1, "IGNOR"]', None, None, None, None), (u'big++explanation++.++right', u'big++explan++.++right', 4, 1, u'[0, 1, 0, 1]', u'[0, 1, 0, 1]', u'[1, 0, 0, 0]', u'[3, 0, 0, 0]', None, None), (u'it++?++1++\U0001f62b++1++.', u'it++?++1++\U0001f62b++1++.', 6, 1, u'[0, 1, 2, 1, "IGNOR", 0]', u'[0, 1, 2, 1, "IGNOR", 0]', None, None, None, None), (u'?++1++\U0001f62b++1++.', u'?++1++\U0001f62b++1++.', 5, 1, u'[1, 2, 1, "IGNOR", 0]', u'[1, 2, 1, "IGNOR", 0]', None, None, None, None), (u'you++\U0001f600', u'you++\U0001f600', 2, 1, u'[1, 1]', u'[2, 1]', None, None, None, None), (u'a', u'a', 1, 3, None, None, None, None, None, None), (u'1++\U0001f62b++1++.++but++you', u'1++\U0001f62b++1++.++but++you', 6, 1, u'[2, 1, "IGNOR", 0, 2, 2]', u'[2, 1, "IGNOR", 0, 5, 2]', u'[0, 0, "IGNOR", 0, 1, 1]', u'[0, 0, "IGNOR", 0, 2, 2]', None, None), (u'you++\U0001f600++\U0001f308', u'you++\U0001f600++\U0001f308', 3, 1, u'[1, 1, 1]', u'[2, 1, 1]', None, None, None, None), (u'.++right++?++what++do++you', u'.++right++?++what++do++you', 6, 1, u'[0, 1, 0, 0, 0, 1]', u'[0, 1, 0, 0, 0, 1]', None, None, None, None), (u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', 5, 1, u'[1, 2, 2, "IGNOR", "IGNOR"]', u'[2, 2, 2, "IGNOR", "IGNOR"]', None, None, None, None), (u'think', u'think', 1, 1, None, None, None, None, None, None), (u'1++\U0001f62b++1++.++but', u'1++\U0001f62b++1++.++but', 5, 1, u'[2, 1, "IGNOR", 0, 2]', u'[2, 1, "IGNOR", 0, 5]', u'[0, 0, "IGNOR", 0, 1]', u'[0, 0, "IGNOR", 0, 2]', None, None), (u'think++about++it++?', u'think++about++it++?', 4, 1, u'[0, 0, 0, 1]', u'[0, 0, 0, 1]', None, None, None, None), (u'big', u'big', 1, 5, u'2', u'2', u'2', u'5', u'2', u'2'), (u'big++explanation', u'big++explan', 2, 1, u'[0, 1]', u'[0, 1]', u'[1, 0]', u'[3, 0]', None, None), (u'1++.++but++you++but++you', u'1++.++but++you++but++you', 6, 1, u'[1, 0, 5, 3, "IGNOR", "IGNOR"]', u'[1, 0, 10, 4, "IGNOR", "IGNOR"]', u'[0, 0, 2, 1, "IGNOR", "IGNOR"]', u'[0, 0, 5, 2, "IGNOR", "IGNOR"]', None, None), (u'right++?++what++do++you', u'right++?++what++do++you', 5, 1, u'[1, 0, 0, 0, 1]', u'[1, 0, 0, 0, 1]', None, None, None, None), (u'but++a++big++explanation', u'but++a++big++explan', 4, 1, u'[0, 0, 0, 1]', u'[0, 0, 0, 1]', u'[0, 0, 1, 0]', u'[0, 0, 3, 0]', None, None), (u'?++1++\U0001f62b++1', u'?++1++\U0001f62b++1', 4, 1, u'[1, 2, 1, "IGNOR"]', u'[1, 2, 1, "IGNOR"]', None, None, None, None), (u'do', u'do', 1, 1, None, None, None, None, None, None), (u'a++big++explanation++.', u'a++big++explan++.', 4, 1, u'[0, 0, 1, 0]', u'[0, 0, 1, 0]', u'[0, 1, 0, 0]', u'[0, 3, 0, 0]', None, None), (u'a++big++explanation++.++right++?', u'a++big++explan++.++right++?', 6, 1, u'[0, 0, 1, 0, 1, 0]', u'[0, 0, 1, 0, 1, 0]', u'[0, 1, 0, 0, 0, 0]', u'[0, 3, 0, 0, 0, 0]', None, None), (u'but++a', u'but++a', 2, 1, None, None, None, None, None, None), (u'1++.', u'1++.', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u',++but++a++big', u',++but++a++big', 4, 1, None, None, u'[0, 0, 0, 1]', u'[0, 0, 0, 3]', None, None), (u'but++i++realy++liked', u'but++i++reali++like', 4, 1, u'[1, 0, 2, 0]', u'[1, 0, 4, 0]', u'[0, 0, 1, 0]', u'[0, 0, 3, 0]', None, None), (u'liked++it++:p++=)++\U0001f600++\U0001f308', u'like++it++:p++=)++\U0001f600++\U0001f308', 6, 1, u'[0, 0, 0, 1, 1, 1]', u'[0, 0, 0, 1, 1, 1]', None, None, None, None), (u'was++realy', u'was++reali', 2, 1, None, None, None, None, None, None), (u',++but++i++realy', u',++but++i++reali', 4, 1, u'[0, 1, 0, 2]', u'[0, 1, 0, 4]', u'[0, 0, 0, 1]', u'[0, 0, 0, 3]', None, None), (u'bad++surprise++for++me++\U0001f62b++,', u'bad++surpris++for++me++\U0001f62b++,', 6, 1, u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 0, 1, 0]', None, None, None, None), (u'i++realy++liked++it++:p', u'i++reali++like++it++:p', 5, 1, u'[0, 2, 0, 0, 0]', u'[0, 4, 0, 0, 0]', u'[0, 1, 0, 0, 0]', u'[0, 3, 0, 0, 0]', None, None), (u'it', u'it', 1, 5, None, None, None, None, None, None), (u'but', u'but', 1, 13, u'11', u'16', u'4', u'10', u'11', u'4'), (u'realy++liked', u'reali++like', 2, 1, u'[2, 0]', u'[4, 0]', u'[1, 0]', u'[3, 0]', None, None), (u':p++=)++\U0001f600++\U0001f308++\U0001f600', u':p++=)++\U0001f600++\U0001f308++\U0001f600', 5, 1, u'[0, 1, 1, 1, "IGNOR"]', u'[0, 1, 1, 1, "IGNOR"]', None, None, None, None), (u'realy++bad++surprise++for', u'reali++bad++surpris++for', 4, 1, None, None, None, None, None, None), (u'me++\U0001f62b++,++but++i', u'me++\U0001f62b++,++but++i', 5, 1, u'[0, 1, 0, 1, 0]', u'[0, 1, 0, 1, 0]', None, None, None, None), (u'me', u'me', 1, 2, None, None, None, None, None, None), (u'was++realy++bad++surprise++for++me', u'was++reali++bad++surpris++for++me', 6, 1, None, None, None, None, None, None), (u'me++\U0001f62b++,', u'me++\U0001f62b++,', 3, 1, u'[0, 1, 0]', u'[0, 1, 0]', None, None, None, None), (u'liked++it++:p++=)++\U0001f600', u'like++it++:p++=)++\U0001f600', 5, 1, u'[0, 0, 0, 1, 1]', u'[0, 0, 0, 1, 1]', None, None, None, None), (u'\U0001f62b++,++but', u'\U0001f62b++,++but', 3, 1, u'[1, 0, 1]', u'[1, 0, 1]', None, None, None, None), (u'realy', u'reali', 1, 4, u'2', u'4', u'1', u'3', u'2', u'1'), (u'surprise++for++me++\U0001f62b', u'surpris++for++me++\U0001f62b', 4, 1, u'[0, 0, 0, 1]', u'[0, 0, 0, 1]', None, None, None, None), (u'i++realy++liked++it++:p++=)', u'i++reali++like++it++:p++=)', 6, 1, u'[0, 2, 0, 0, 0, 1]', u'[0, 4, 0, 0, 0, 1]', u'[0, 1, 0, 0, 0, 0]', u'[0, 3, 0, 0, 0, 0]', None, None), (u'\U0001f600', u'\U0001f600', 1, 5, u'4', u'4', None, None, u'4', None), (u'\U0001f308++\U0001f600', u'\U0001f308++\U0001f600', 2, 3, u'[3, 2]', u'[3, 2]', None, None, None, None), (u'=)', u'=)', 1, 1, u'1', u'1', None, None, u'1', None), (u':p', u':p', 1, 1, None, None, None, None, None, None), (u'i++realy++liked++it', u'i++reali++like++it', 4, 1, u'[0, 2, 0, 0]', u'[0, 4, 0, 0]', u'[0, 1, 0, 0]', u'[0, 3, 0, 0]', None, None), (u'me++\U0001f62b++,++but', u'me++\U0001f62b++,++but', 4, 1, u'[0, 1, 0, 1]', u'[0, 1, 0, 1]', None, None, None, None), (u'it++was', u'it++was', 2, 2, None, None, None, None, None, None), (u'surprise++for++me', u'surpris++for++me', 3, 1, None, None, None, None, None, None), (u'\U0001f62b++,++but++i++realy', u'\U0001f62b++,++but++i++reali', 5, 1, u'[1, 0, 1, 0, 2]', u'[1, 0, 1, 0, 4]', u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 3]', None, None), (u'=)++\U0001f600++\U0001f308', u'=)++\U0001f600++\U0001f308', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, None, None), (u',++but++i', u',++but++i', 3, 1, u'[0, 1, 0]', u'[0, 1, 0]', None, None, None, None), (u'it++:p++=)++\U0001f600', u'it++:p++=)++\U0001f600', 4, 1, u'[0, 0, 1, 1]', u'[0, 0, 1, 1]', None, None, None, None), (u'but++i++realy++liked++it++:p', u'but++i++reali++like++it++:p', 6, 1, u'[1, 0, 2, 0, 0, 0]', u'[1, 0, 4, 0, 0, 0]', u'[0, 0, 1, 0, 0, 0]', u'[0, 0, 3, 0, 0, 0]', None, None), (u'realy++liked++it++:p++=)', u'reali++like++it++:p++=)', 5, 1, u'[2, 0, 0, 0, 1]', u'[4, 0, 0, 0, 1]', u'[1, 0, 0, 0, 0]', u'[3, 0, 0, 0, 0]', None, None), (u'=)++\U0001f600++\U0001f308++\U0001f600', u'=)++\U0001f600++\U0001f308++\U0001f600', 4, 1, u'[1, 1, 1, "IGNOR"]', u'[1, 1, 1, "IGNOR"]', None, None, None, None), (u'liked++it', u'like++it', 2, 1, None, None, None, None, None, None), (u'it++:p++=)', u'it++:p++=)', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', None, None, None, None), (u'realy++bad++surprise++for++me', u'reali++bad++surpris++for++me', 5, 1, None, None, None, None, None, None), (u'\U0001f62b++,', u'\U0001f62b++,', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'but++i++realy++liked++it', u'but++i++reali++like++it', 5, 1, u'[1, 0, 2, 0, 0]', u'[1, 0, 4, 0, 0]', u'[0, 0, 1, 0, 0]', u'[0, 0, 3, 0, 0]', None, None), (u'for++me', u'for++me', 2, 2, None, None, None, None, None, None), (u'\U0001f308', u'\U0001f308', 1, 3, u'3', u'3', None, None, u'3', None), (u'for++me++\U0001f62b', u'for++me++\U0001f62b', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', None, None, None, None), (u'but++i', u'but++i', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'bad++surprise', u'bad++surpris', 2, 1, None, None, None, None, None, None), (u'i++realy++liked', u'i++reali++like', 3, 1, u'[0, 2, 0]', u'[0, 4, 0]', u'[0, 1, 0]', u'[0, 3, 0]', None, None), (u'bad++surprise++for++me', u'bad++surpris++for++me', 4, 1, None, None, None, None, None, None), (u'for++me++\U0001f62b++,++but', u'for++me++\U0001f62b++,++but', 5, 1, u'[0, 0, 1, 0, 1]', u'[0, 0, 1, 0, 1]', None, None, None, None), (u'realy++liked++it', u'reali++like++it', 3, 1, u'[2, 0, 0]', u'[4, 0, 0]', u'[1, 0, 0]', u'[3, 0, 0]', None, None), (u'\U0001f600++\U0001f308', u'\U0001f600++\U0001f308', 2, 3, u'[3, 3]', u'[3, 3]', None, None, None, None), (u'it++:p', u'it++:p', 2, 1, None, None, None, None, None, None), (u'liked++it++:p', u'like++it++:p', 3, 1, None, None, None, None, None, None), (u'for', u'for', 1, 3, None, None, None, None, None, None), (u'for++me++\U0001f62b++,', u'for++me++\U0001f62b++,', 4, 1, u'[0, 0, 1, 0]', u'[0, 0, 1, 0]', None, None, None, None), (u'\U0001f600++\U0001f308++\U0001f600', u'\U0001f600++\U0001f308++\U0001f600', 3, 3, u'[4, 3, "IGNOR"]', u'[5, 3, "IGNOR"]', None, None, None, None), (u'realy++bad++surprise', u'reali++bad++surpris', 3, 1, None, None, None, None, None, None), (u'\U0001f62b++,++but++i++realy++liked', u'\U0001f62b++,++but++i++reali++like', 6, 1, u'[1, 0, 1, 0, 2, 0]', u'[1, 0, 1, 0, 4, 0]', u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 0, 3, 0]', None, None), (u'was++realy++bad++surprise', u'was++reali++bad++surpris', 4, 1, None, None, None, None, None, None), (u'me++\U0001f62b++,++but++i++realy', u'me++\U0001f62b++,++but++i++reali', 6, 1, u'[0, 1, 0, 1, 0, 2]', u'[0, 1, 0, 1, 0, 4]', u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 3]', None, None), (u',', u',', 1, 4, None, None, None, None, None, None), (u',++but', u',++but', 2, 2, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'it++:p++=)++\U0001f600++\U0001f308', u'it++:p++=)++\U0001f600++\U0001f308', 5, 1, u'[0, 0, 1, 1, 1]', u'[0, 0, 1, 1, 1]', None, None, None, None), (u'was++realy++bad++surprise++for', u'was++reali++bad++surpris++for', 5, 1, None, None, None, None, None, None), (u'=)++\U0001f600', u'=)++\U0001f600', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'bad++surprise++for++me++\U0001f62b', u'bad++surpris++for++me++\U0001f62b', 5, 1, u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 1]', None, None, None, None), (u':p++=)', u':p++=)', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'\U0001f62b++,++but++i', u'\U0001f62b++,++but++i', 4, 1, u'[1, 0, 1, 0]', u'[1, 0, 1, 0]', None, None, None, None), (u'realy++bad++surprise++for++me++\U0001f62b', u'reali++bad++surpris++for++me++\U0001f62b', 6, 1, u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 1]', None, None, None, None), (u':p++=)++\U0001f600', u':p++=)++\U0001f600', 3, 1, u'[0, 1, 1]', u'[0, 1, 1]', None, None, None, None), (u'me++\U0001f62b', u'me++\U0001f62b', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'realy++liked++it++:p', u'reali++like++it++:p', 4, 1, u'[2, 0, 0, 0]', u'[4, 0, 0, 0]', u'[1, 0, 0, 0]', u'[3, 0, 0, 0]', None, None), (u'surprise', u'surpris', 1, 2, None, None, None, None, None, None), (u'it++was++realy++bad++surprise', u'it++was++reali++bad++surpris', 5, 1, None, None, None, None, None, None), (u'it++:p++=)++\U0001f600++\U0001f308++\U0001f600', u'it++:p++=)++\U0001f600++\U0001f308++\U0001f600', 6, 1, u'[0, 0, 1, 1, 1, "IGNOR"]', u'[0, 0, 1, 1, 1, "IGNOR"]', None, None, None, None), (u'\U0001f62b', u'\U0001f62b', 1, 3, u'3', u'3', None, None, u'3', None), (u'but++i++realy', u'but++i++reali', 3, 1, u'[1, 0, 2]', u'[1, 0, 4]', u'[0, 0, 1]', u'[0, 0, 3]', None, None), (u'it++was++realy++bad++surprise++for', u'it++was++reali++bad++surpris++for', 6, 1, None, None, None, None, None, None), (u':p++=)++\U0001f600++\U0001f308', u':p++=)++\U0001f600++\U0001f308', 4, 1, u'[0, 1, 1, 1]', u'[0, 1, 1, 1]', None, None, None, None), (u'bad', u'bad', 1, 6, u'4', u'7', u'1', u'5', u'4', u'1'), (u'surprise++for++me++\U0001f62b++,', u'surpris++for++me++\U0001f62b++,', 5, 1, u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 1, 0]', None, None, None, None), (u'surprise++for++me++\U0001f62b++,++but', u'surpris++for++me++\U0001f62b++,++but', 6, 1, u'[0, 0, 0, 1, 0, 1]', u'[0, 0, 0, 1, 0, 1]', None, None, None, None), (u'it++was++realy++bad', u'it++was++reali++bad', 4, 1, None, None, None, None, None, None), (u'it++was++realy', u'it++was++reali', 3, 1, None, None, None, None, None, None), (u'bad++surprise++for', u'bad++surpris++for', 3, 1, None, None, None, None, None, None), (u'liked++it++:p++=)', u'like++it++:p++=)', 4, 1, u'[0, 0, 0, 1]', u'[0, 0, 0, 1]', None, None, None, None), (u'i', u'i', 1, 2, None, None, None, None, None, None), (u'surprise++for', u'surpris++for', 2, 1, None, None, None, None, None, None), (u'realy++liked++it++:p++=)++\U0001f600', u'reali++like++it++:p++=)++\U0001f600', 6, 1, u'[2, 0, 0, 0, 1, 1]', u'[4, 0, 0, 0, 1, 1]', u'[1, 0, 0, 0, 0, 0]', u'[3, 0, 0, 0, 0, 0]', None, None), (u',++but++i++realy++liked', u',++but++i++reali++like', 5, 1, u'[0, 1, 0, 2, 0]', u'[0, 1, 0, 4, 0]', u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 3, 0]', None, None), (u'realy++bad', u'reali++bad', 2, 1, None, None, None, None, None, None), (u'for++me++\U0001f62b++,++but++i', u'for++me++\U0001f62b++,++but++i', 6, 1, u'[0, 0, 1, 0, 1, 0]', u'[0, 0, 1, 0, 1, 0]', None, None, None, None), (u'was++realy++bad', u'was++reali++bad', 3, 1, None, None, None, None, None, None), (u'was', u'was', 1, 2, None, None, None, None, None, None), (u'liked', u'like', 1, 1, None, None, None, None, None, None), (u'i++realy', u'i++reali', 2, 1, u'[0, 2]', u'[0, 4]', u'[0, 1]', u'[0, 3]', None, None), (u',++but++i++realy++liked++it', u',++but++i++reali++like++it', 6, 1, u'[0, 1, 0, 2, 0, 0]', u'[0, 1, 0, 4, 0, 0]', u'[0, 0, 0, 1, 0, 0]', u'[0, 0, 0, 3, 0, 0]', None, None)]
# right_baseline_not_freezed_full_repetativ = [(u'also++very++pity++for++me', u'also++veri++piti++for++me', 5, 1, None, None, None, None, None, None), (u'it++was++also++very', u'it++was++also++veri', 4, 1, None, None, None, None, None, None), (u'.++:-(++@real_trump++#shetlife', u'.++:-(++@real_trump++#shetlif', 4, 1, None, None, None, None, None, None), (u'.++but++it', u'.++but++it', 3, 1, None, None, None, None, None, None), (u'to', u'to', 1, 1, None, None, None, None, None, None), (u':-(++@real_trump++#shetlife++#readytogo++http://www.absurd.com', u':-(++@real_trump++#shetlif++#readytogo++http://www.absurd.com', 5, 1, None, None, None, None, None, None), (u'glad++to++se++you++-)', u'glad++to++se++you++-)', 5, 1, None, None, None, None, None, None), (u'i++loved++it++.++but', u'i++love++it++.++but', 5, 1, None, None, None, None, None, None), (u'me++.', u'me++.', 2, 1, None, None, None, None, None, None), (u'i++loved', u'i++love', 2, 1, None, None, None, None, None, None), (u'.++:-(++@real_trump', u'.++:-(++@real_trump', 3, 1, None, None, None, None, None, None), (u'i++loved++it', u'i++love++it', 3, 1, None, None, None, None, None, None), (u'-)', u'-)', 1, 1, u'1', u'1', None, None, u'1', None), (u'you++-)', u'you++-)', 2, 1, None, None, None, None, None, None), (u'me++.++:-(', u'me++.++:-(', 3, 1, None, None, None, None, None, None), (u'.++:-(++@real_trump++#shetlife++#readytogo', u'.++:-(++@real_trump++#shetlif++#readytogo', 5, 1, None, None, None, None, None, None), (u'but++it', u'but++it', 2, 1, None, None, None, None, None, None), (u'pity++for++me++.', u'piti++for++me++.', 4, 1, None, None, None, None, None, None), (u'for++me++.++:-(', u'for++me++.++:-(', 4, 1, None, None, None, None, None, None), (u'me++.++:-(++@real_trump++#shetlife++#readytogo', u'me++.++:-(++@real_trump++#shetlif++#readytogo', 6, 1, None, None, None, None, None, None), (u'it++was++also++very++pity', u'it++was++also++veri++piti', 5, 1, None, None, None, None, None, None), (u'very++pity++for++me++.++:-(', u'veri++piti++for++me++.++:-(', 6, 1, None, None, None, None, None, None), (u'to++se++you++-)', u'to++se++you++-)', 4, 1, None, None, None, None, None, None), (u'http://www.absurd.com', u'http://www.absurd.com', 1, 1, None, None, None, None, None, None), (u'it++was++also++very++pity++for', u'it++was++also++veri++piti++for', 6, 1, None, None, None, None, None, None), (u'very++pity++for', u'veri++piti++for', 3, 1, None, None, None, None, None, None), (u'it++.', u'it++.', 2, 1, None, None, None, None, None, None), (u'loved', u'love', 1, 1, None, None, None, None, None, None), (u'@real_trump', u'@real_trump', 1, 1, None, None, None, None, None, None), (u'se++you++-)', u'se++you++-)', 3, 1, None, None, None, None, None, None), (u'glad++to', u'glad++to', 2, 1, None, None, None, None, None, None), (u'but++it++was++also++very', u'but++it++was++also++veri', 5, 1, None, None, None, None, None, None), (u'also', u'also', 1, 1, None, None, None, None, None, None), (u'for++me++.', u'for++me++.', 3, 1, None, None, None, None, None, None), (u'loved++it++.++but++it', u'love++it++.++but++it', 5, 1, None, None, None, None, None, None), (u'was++also', u'was++also', 2, 1, None, None, None, None, None, None), (u'it++was++also', u'it++was++also', 3, 1, None, None, None, None, None, None), (u'loved++it', u'love++it', 2, 1, None, None, None, None, None, None), (u'pity++for++me++.++:-(', u'piti++for++me++.++:-(', 5, 1, None, None, None, None, None, None), (u'loved++it++.++but', u'love++it++.++but', 4, 1, None, None, None, None, None, None), (u'@real_trump++#shetlife++#readytogo++http://www.absurd.com', u'@real_trump++#shetlif++#readytogo++http://www.absurd.com', 4, 1, None, None, None, None, None, None), (u'.++:-(++@real_trump++#shetlife++#readytogo++http://www.absurd.com', u'.++:-(++@real_trump++#shetlif++#readytogo++http://www.absurd.com', 6, 1, None, None, None, None, None, None), (u'pity', u'piti', 1, 4, u'2', u'4', u'1', u'4', u'2', u'1'), (u'me++.++:-(++@real_trump', u'me++.++:-(++@real_trump', 4, 1, None, None, None, None, None, None), (u'but++it++was++also++very++pity', u'but++it++was++also++veri++piti', 6, 1, None, None, None, None, None, None), (u'i++loved++it++.', u'i++love++it++.', 4, 1, None, None, None, None, None, None), (u'very++pity++for++me++.', u'veri++piti++for++me++.', 5, 1, None, None, None, None, None, None), (u'#readytogo++http://www.absurd.com', u'#readytogo++http://www.absurd.com', 2, 1, None, None, None, None, None, None), (u'#readytogo', u'#readytogo', 1, 1, None, None, None, None, None, None), (u'also++very++pity++for++me++.', u'also++veri++piti++for++me++.', 6, 1, None, None, None, None, None, None), (u'se++you', u'se++you', 2, 1, None, None, None, None, None, None), (u'se', u'se', 1, 1, u'1', u'1', None, None, u'1', None), (u'for++me++.++:-(++@real_trump++#shetlife', u'for++me++.++:-(++@real_trump++#shetlif', 6, 1, None, None, None, None, None, None), (u'but++it++was', u'but++it++was', 3, 1, None, None, None, None, None, None), (u'glad++to++se++you', u'glad++to++se++you', 4, 1, None, None, None, None, None, None), (u'#shetlife++#readytogo', u'#shetlif++#readytogo', 2, 1, None, None, None, None, None, None), (u'very++pity++for++me', u'veri++piti++for++me', 4, 1, None, None, None, None, None, None), (u'@real_trump++#shetlife++#readytogo', u'@real_trump++#shetlif++#readytogo', 3, 1, None, None, None, None, None, None), (u'#shetlife++#readytogo++http://www.absurd.com', u'#shetlif++#readytogo++http://www.absurd.com', 3, 1, None, None, None, None, None, None), (u':-(++@real_trump', u':-(++@real_trump', 2, 1, None, None, None, None, None, None), (u'pity++for++me++.++:-(++@real_trump', u'piti++for++me++.++:-(++@real_trump', 6, 1, None, None, None, None, None, None), (u'.++but++it++was++also', u'.++but++it++was++also', 5, 1, None, None, None, None, None, None), (u'it++.++but++it++was', u'it++.++but++it++was', 5, 1, None, None, None, None, None, None), (u'was++also++very++pity++for', u'was++also++veri++piti++for', 5, 1, None, None, None, None, None, None), (u'also++very', u'also++veri', 2, 1, None, None, None, None, None, None), (u'to++se', u'to++se', 2, 1, None, None, None, None, None, None), (u'pity++for', u'piti++for', 2, 1, None, None, None, None, None, None), (u'to++se++you', u'to++se++you', 3, 1, None, None, None, None, None, None), (u'for++me++.++:-(++@real_trump', u'for++me++.++:-(++@real_trump', 5, 1, None, None, None, None, None, None), (u'also++very++pity', u'also++veri++piti', 3, 1, None, None, None, None, None, None), (u'very', u'veri', 1, 3, u'2', u'4', u'1', u'3', u'2', u'1'), (u'it++.++but++it++was++also', u'it++.++but++it++was++also', 6, 1, None, None, None, None, None, None), (u'was++also++very', u'was++also++veri', 3, 1, None, None, None, None, None, None), (u'loved++it++.++but++it++was', u'love++it++.++but++it++was', 6, 1, None, None, None, None, None, None), (u'pity++for++me', u'piti++for++me', 3, 1, None, None, None, None, None, None), (u'me++.++:-(++@real_trump++#shetlife', u'me++.++:-(++@real_trump++#shetlif', 5, 1, None, None, None, None, None, None), (u'very++pity', u'veri++piti', 2, 1, u'[2, 2]', u'[4, 4]', u'[1, 1]', u'[3, 4]', u'1', u'1'), (u'was++also++very++pity++for++me', u'was++also++veri++piti++for++me', 6, 1, None, None, None, None, None, None), (u'also++very++pity++for', u'also++veri++piti++for', 4, 1, None, None, None, None, None, None), (u'but++it++was++also', u'but++it++was++also', 4, 1, None, None, None, None, None, None), (u'@real_trump++#shetlife', u'@real_trump++#shetlif', 2, 1, None, None, None, None, None, None), (u'it++.++but++it', u'it++.++but++it', 4, 1, None, None, None, None, None, None), (u'.++but++it++was', u'.++but++it++was', 4, 1, None, None, None, None, None, None), (u':-(++@real_trump++#shetlife', u':-(++@real_trump++#shetlif', 3, 1, None, None, None, None, None, None), (u'glad++to++se', u'glad++to++se', 3, 1, None, None, None, None, None, None), (u':-(++@real_trump++#shetlife++#readytogo', u':-(++@real_trump++#shetlif++#readytogo', 4, 1, None, None, None, None, None, None), (u'.++:-(', u'.++:-(', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'loved++it++.', u'love++it++.', 3, 1, None, None, None, None, None, None), (u'glad', u'glad', 1, 1, u'1', u'1', None, None, u'1', None), (u'.++but++it++was++also++very', u'.++but++it++was++also++veri', 6, 1, None, None, None, None, None, None), (u'was++also++very++pity', u'was++also++veri++piti', 4, 1, None, None, None, None, None, None), (u'i++loved++it++.++but++it', u'i++love++it++.++but++it', 6, 1, None, None, None, None, None, None), (u'it++.++but', u'it++.++but', 3, 1, None, None, None, None, None, None), (u',++which', u',++which', 2, 2, None, None, None, None, None, None), (u'bad++news++,++which', u'bad++news++,++which', 4, 1, None, None, None, None, None, None), (u',++which++we++can++not', u',++which++we++can++not', 5, 1, None, None, None, None, None, None), (u'tiny++model++,++which++we', u'tini++model++,++which++we', 5, 1, None, None, None, None, None, None), (u'acept++.++-(', u'acept++.++-(', 3, 1, None, None, None, None, None, None), (u',++which++we++can', u',++which++we++can', 4, 2, None, None, None, None, None, None), (u'acept++.', u'acept++.', 2, 1, None, None, None, None, None, None), (u',++which++we', u',++which++we', 3, 2, None, None, None, None, None, None), (u'not++acept++.++-(++\U0001f62b', u'not++acept++.++-(++\U0001f62b', 5, 1, None, None, None, None, None, None), (u'tiny++model++,++which', u'tini++model++,++which', 4, 1, None, None, None, None, None, None), (u'a++bad++news++,', u'a++bad++news++,', 4, 1, None, None, None, None, None, None), (u'can++not++acept++.', u'can++not++acept++.', 4, 1, None, None, None, None, None, None), (u'-(++\U0001f62b', u'-(++\U0001f62b', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'which++we++can', u'which++we++can', 3, 2, None, None, None, None, None, None), (u'-(++\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'-(++\U0001f62b++:-(++#shetlif++http://www.noooo.com', 5, 1, None, None, None, None, None, None), (u'explain++a++big', u'explain++a++big', 3, 1, None, None, None, None, None, None), (u'we++can', u'we++can', 2, 2, None, None, None, None, None, None), (u'can++use', u'can++use', 2, 1, None, None, None, None, None, None), (u'we++can++use++for++explain', u'we++can++use++for++explain', 5, 1, None, None, None, None, None, None), (u',++which++we++can++use++for', u',++which++we++can++use++for', 6, 1, None, None, None, None, None, None), (u'use++for++explain', u'use++for++explain', 3, 1, None, None, None, None, None, None), (u'explain++a++big++things++.', u'explain++a++big++thing++.', 5, 1, None, None, None, None, None, None), (u'a++bad++news', u'a++bad++news', 3, 1, None, None, None, None, None, None), (u'bad++news++,++which++we', u'bad++news++,++which++we', 5, 1, None, None, None, None, None, None), (u'for++explain', u'for++explain', 2, 1, None, None, None, None, None, None), (u'can++use++for++explain++a', u'can++use++for++explain++a', 5, 1, None, None, None, None, None, None), (u'we++can++not', u'we++can++not', 3, 1, None, None, None, None, None, None), (u'explain', u'explain', 1, 1, None, None, None, None, None, None), (u'-(', u'-(', 1, 1, u'1', u'1', None, None, u'1', None), (u'bad++news++,++which++we++can', u'bad++news++,++which++we++can', 6, 1, None, None, None, None, None, None), (u'bad++news', u'bad++news', 2, 1, None, None, None, None, None, None), (u'news++,++which++we++can', u'news++,++which++we++can', 5, 1, None, None, None, None, None, None), (u'news++,++which++we', u'news++,++which++we', 4, 1, None, None, None, None, None, None), (u'a++bad++news++,++which', u'a++bad++news++,++which', 5, 1, None, None, None, None, None, None), (u'big++things++.', u'big++thing++.', 3, 1, None, None, None, None, None, None), (u'things++.', u'thing++.', 2, 1, None, None, None, None, None, None), (u'things', u'thing', 1, 1, None, None, None, None, None, None), (u'-(++\U0001f62b++:-(', u'-(++\U0001f62b++:-(', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None), (u'model++,++which++we', u'model++,++which++we', 4, 1, None, None, None, None, None, None), (u'#shetlife', u'#shetlif', 1, 3, None, None, u'1', u'2', None, u'1'), (u'can++not++acept++.++-(++\U0001f62b', u'can++not++acept++.++-(++\U0001f62b', 6, 1, None, None, None, None, None, None), (u'we++can++not++acept++.', u'we++can++not++acept++.', 5, 1, None, None, None, None, None, None), (u'big++things', u'big++thing', 2, 1, None, None, None, None, None, None), (u'use++for++explain++a', u'use++for++explain++a', 4, 1, None, None, None, None, None, None), (u'not++acept', u'not++acept', 2, 1, None, None, None, None, None, None), (u'acept++.++-(++\U0001f62b++:-(', u'acept++.++-(++\U0001f62b++:-(', 5, 1, None, None, None, None, None, None), (u'for++explain++a++big++things++.', u'for++explain++a++big++thing++.', 6, 1, None, None, None, None, None, None), (u'\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'\U0001f62b++:-(++#shetlif++http://www.noooo.com', 4, 1, None, None, None, None, None, None), (u'we++can++use', u'we++can++use', 3, 1, None, None, None, None, None, None), (u'which++we++can++use++for++explain', u'which++we++can++use++for++explain', 6, 1, None, None, None, None, None, None), (u'not++acept++.++-(', u'not++acept++.++-(', 4, 1, None, None, None, None, None, None), (u':-(++#shetlife', u':-(++#shetlif', 2, 1, None, None, None, None, None, None), (u'which++we++can++use', u'which++we++can++use', 4, 1, None, None, None, None, None, None), (u'explain++a', u'explain++a', 2, 1, None, None, None, None, None, None), (u'.++-(++\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'.++-(++\U0001f62b++:-(++#shetlif++http://www.noooo.com', 6, 1, None, None, None, None, None, None), (u'not++acept++.', u'not++acept++.', 3, 1, None, None, None, None, None, None), (u'a++big++things', u'a++big++thing', 3, 1, None, None, None, None, None, None), (u'.++-(', u'.++-(', 2, 1, None, None, None, None, None, None), (u'a++bad', u'a++bad', 2, 1, None, None, None, None, None, None), (u'use++for', u'use++for', 2, 1, None, None, None, None, None, None), (u'can++not++acept++.++-(', u'can++not++acept++.++-(', 5, 1, None, None, None, None, None, None), (u'a++big++things++.', u'a++big++thing++.', 4, 1, None, None, None, None, None, None), (u'news', u'news', 1, 1, None, None, None, None, None, None), (u'which++we++can++not', u'which++we++can++not', 4, 1, None, None, None, None, None, None), (u'http://www.noooo.com', u'http://www.noooo.com', 1, 1, None, None, None, None, None, None), (u'-(++\U0001f62b++:-(++#shetlife', u'-(++\U0001f62b++:-(++#shetlif', 4, 1, None, None, None, None, None, None), (u'acept++.++-(++\U0001f62b', u'acept++.++-(++\U0001f62b', 4, 1, None, None, None, None, None, None), (u'which++we++can++not++acept', u'which++we++can++not++acept', 5, 1, None, None, None, None, None, None), (u':-(', u':-(', 1, 2, u'2', u'2', None, None, u'2', None), (u'news++,++which++we++can++not', u'news++,++which++we++can++not', 6, 1, None, None, None, None, None, None), (u'can++use++for++explain', u'can++use++for++explain', 4, 1, None, None, None, None, None, None), (u':-(++#shetlife++http://www.noooo.com', u':-(++#shetlif++http://www.noooo.com', 3, 1, None, None, None, None, None, None), (u'not', u'not', 1, 1, None, None, None, None, None, None), (u',++which++we++can++not++acept', u',++which++we++can++not++acept', 6, 1, None, None, None, None, None, None), (u'which++we++can++use++for', u'which++we++can++use++for', 5, 1, None, None, None, None, None, None), (u'can++not++acept', u'can++not++acept', 3, 1, None, None, None, None, None, None), (u'explain++a++big++things', u'explain++a++big++thing', 4, 1, None, None, None, None, None, None), (u'can', u'can', 1, 2, None, None, None, None, None, None), (u'tiny++model++,++which++we++can', u'tini++model++,++which++we++can', 6, 1, None, None, None, None, None, None), (u'acept++.++-(++\U0001f62b++:-(++#shetlife', u'acept++.++-(++\U0001f62b++:-(++#shetlif', 6, 1, None, None, None, None, None, None), (u'use++for++explain++a++big++things', u'use++for++explain++a++big++thing', 6, 1, None, None, None, None, None, None), (u'we++can++use++for++explain++a', u'we++can++use++for++explain++a', 6, 1, None, None, None, None, None, None), (u'use++for++explain++a++big', u'use++for++explain++a++big', 5, 1, None, None, None, None, None, None), (u'model++,++which++we++can++use', u'model++,++which++we++can++use', 6, 1, None, None, None, None, None, None), (u'which++we', u'which++we', 2, 2, None, None, None, None, None, None), (u'not++acept++.++-(++\U0001f62b++:-(', u'not++acept++.++-(++\U0001f62b++:-(', 6, 1, None, None, None, None, None, None), (u'model++,++which++we++can', u'model++,++which++we++can', 5, 1, None, None, None, None, None, None), (u'we++can++not++acept', u'we++can++not++acept', 4, 1, None, None, None, None, None, None), (u'use', u'use', 1, 1, None, None, None, None, None, None), (u',++which++we++can++use', u',++which++we++can++use', 5, 1, None, None, None, None, None, None), (u'bad++news++,', u'bad++news++,', 3, 1, None, None, None, None, None, None), (u'can++use++for', u'can++use++for', 3, 1, None, None, None, None, None, None), (u'news++,', u'news++,', 2, 1, None, None, None, None, None, None), (u'can++not', u'can++not', 2, 1, None, None, None, None, None, None), (u'.++-(++\U0001f62b++:-(', u'.++-(++\U0001f62b++:-(', 4, 1, None, None, None, None, None, None), (u'we', u'we', 1, 2, None, None, None, None, None, None), (u'for++explain++a', u'for++explain++a', 3, 1, None, None, None, None, None, None), (u'acept', u'acept', 1, 1, None, None, None, None, None, None), (u'for++explain++a++big', u'for++explain++a++big', 4, 1, None, None, None, None, None, None), (u'a++bad++news++,++which++we', u'a++bad++news++,++which++we', 6, 1, None, None, None, None, None, None), (u'#shetlife++http://www.noooo.com', u'#shetlif++http://www.noooo.com', 2, 1, None, None, None, None, None, None), (u'\U0001f62b++:-(', u'\U0001f62b++:-(', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'.++-(++\U0001f62b', u'.++-(++\U0001f62b', 3, 1, None, None, None, None, None, None), (u'we++can++not++acept++.++-(', u'we++can++not++acept++.++-(', 6, 1, None, None, None, None, None, None), (u'news++,++which', u'news++,++which', 3, 1, None, None, None, None, None, None), (u'which', u'which', 1, 2, None, None, None, None, None, None), (u'model++,++which', u'model++,++which', 3, 1, None, None, None, None, None, None), (u'we++can++use++for', u'we++can++use++for', 4, 1, None, None, None, None, None, None), (u'which++we++can++not++acept++.', u'which++we++can++not++acept++.', 6, 1, None, None, None, None, None, None), (u'.++-(++\U0001f62b++:-(++#shetlife', u'.++-(++\U0001f62b++:-(++#shetlif', 5, 1, None, None, None, None, None, None), (u'can++use++for++explain++a++big', u'can++use++for++explain++a++big', 6, 1, None, None, None, None, None, None), (u'\U0001f62b++:-(++#shetlife', u'\U0001f62b++:-(++#shetlif', 3, 1, None, None, None, None, None, None), (u'for++explain++a++big++things', u'for++explain++a++big++thing', 5, 1, None, None, None, None, None, None), (u'model', u'model', 1, 2, u'1', u'2', None, None, u'1', None), (u'but++a++big++explanation++.++right', u'but++a++big++explan++.++right', 6, 1, None, None, None, None, None, None), (u'what', u'what', 1, 1, None, None, None, None, None, None), (u'do++you++think++about', u'do++you++think++about', 4, 1, None, None, None, None, None, None), (u'you++\U0001f600++\U0001f308++\U0001f600', u'you++\U0001f600++\U0001f308++\U0001f600', 4, 1, u'[1, 2, 1, "IGNOR"]', u'[2, 2, 1, "IGNOR"]', None, None, u'1', None), (u'it++?++1++\U0001f62b', u'it++?++1++\U0001f62b', 4, 1, None, None, None, None, None, None), (u'a++big', u'a++big', 2, 2, None, None, None, None, None, None), (u'1++\U0001f62b++1++.', u'1++\U0001f62b++1++.', 4, 1, None, None, None, None, None, None), (u'you++think', u'you++think', 2, 1, None, None, None, None, None, None), (u',++but++a++big++explanation++.', u',++but++a++big++explan++.', 6, 1, None, None, None, None, None, None), (u'what++do++you++think++about++it', u'what++do++you++think++about++it', 6, 1, None, None, None, None, None, None), (u'think++about++it++?++1++\U0001f62b', u'think++about++it++?++1++\U0001f62b', 6, 1, None, None, None, None, None, None), (u'but++you', u'but++you', 2, 4, u'[10, 6]', u'[15, 8]', u'[2, 2]', u'[4, 4]', u'4', u'2'), (u'but++you++\U0001f600++\U0001f308++\U0001f600', u'but++you++\U0001f600++\U0001f308++\U0001f600', 5, 1, u'[3, 1, 2, 1, "IGNOR"]', u'[3, 2, 2, 1, "IGNOR"]', None, None, u'1', None), (u'.++but', u'.++but', 2, 3, None, None, None, None, None, None), (u'big++explanation++.++right++?++what', u'big++explan++.++right++?++what', 6, 1, None, None, None, None, None, None), (u'tiny++surprise++.++but', u'tini++surpris++.++but', 4, 1, None, None, None, None, None, None), (u'about++it++?++1++\U0001f62b++1', u'about++it++?++1++\U0001f62b++1', 6, 1, None, None, None, None, None, None), (u'you++think++about++it++?', u'you++think++about++it++?', 5, 1, None, None, None, None, None, None), (u'?++what++do', u'?++what++do', 3, 1, None, None, None, None, None, None), (u'\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'\U0001f600++\U0001f308++\U0001f600++\U0001f308', 4, 1, u'[2, 2, "IGNOR", "IGNOR"]', u'[2, 2, "IGNOR", "IGNOR"]', None, None, u'1', None), (u'what++do++you', u'what++do++you', 3, 1, None, None, None, None, None, None), (u'but++a++big++explanation++.', u'but++a++big++explan++.', 5, 1, None, None, None, None, None, None), (u',++but++a', u',++but++a', 3, 1, None, None, None, None, None, None), (u'1', u'1', 1, 2, u'2', u'2', None, None, u'2', None), (u'model++,', u'model++,', 2, 2, None, None, None, None, None, None), (u'?++what++do++you++think++about', u'?++what++do++you++think++about', 6, 1, None, None, None, None, None, None), (u'what++do++you++think', u'what++do++you++think', 4, 1, None, None, None, None, None, None), (u'right++?++what++do', u'right++?++what++do', 4, 1, None, None, None, None, None, None), (u'what++do', u'what++do', 2, 1, None, None, None, None, None, None), (u'.++right++?++what', u'.++right++?++what', 4, 1, None, None, None, None, None, None), (u'.++but++you', u'.++but++you', 3, 2, None, None, None, None, None, None), (u'about++it++?++1', u'about++it++?++1', 4, 1, None, None, None, None, None, None), (u'tiny', u'tini', 1, 10, u'1', u'1', u'2', u'9', u'1', u'2'), (u'tiny++model', u'tini++model', 2, 2, None, None, None, None, None, None), (u'think++about', u'think++about', 2, 1, None, None, None, None, None, None), (u'surprise++.++but++you', u'surpris++.++but++you', 4, 1, None, None, None, None, None, None), (u'explanation++.++right++?++what', u'explan++.++right++?++what', 5, 1, None, None, None, None, None, None), (u'1++.++but++you++but', u'1++.++but++you++but', 5, 1, None, None, None, None, None, None), (u'model++,++but++a++big++explanation', u'model++,++but++a++big++explan', 6, 1, None, None, None, None, None, None), (u'?++1++\U0001f62b++1++.++but', u'?++1++\U0001f62b++1++.++but', 6, 1, None, None, None, None, None, None), (u'a++big++explanation', u'a++big++explan', 3, 1, None, None, None, None, None, None), (u'explanation++.++right++?++what++do', u'explan++.++right++?++what++do', 6, 1, None, None, None, None, None, None), (u'?++what', u'?++what', 2, 1, None, None, None, None, None, None), (u'right', u'right', 1, 1, u'1', u'1', None, None, u'1', None), (u'you++but++you', u'you++but++you', 3, 2, u'[6, 6, "IGNOR"]', u'[8, 8, "IGNOR"]', None, None, u'2', None), (u'big++explanation++.++right++?', u'big++explan++.++right++?', 5, 1, None, None, None, None, None, None), (u'it++?', u'it++?', 2, 1, None, None, None, None, None, None), (u'what++do++you++think++about', u'what++do++you++think++about', 5, 1, None, None, None, None, None, None), (u'but++you++but++you++\U0001f600++\U0001f308', u'but++you++but++you++\U0001f600++\U0001f308', 6, 1, u'[5, 3, "IGNOR", "IGNOR", 1, 1]', u'[5, 4, "IGNOR", "IGNOR", 1, 1]', None, None, u'1', None), (u'\U0001f308++\U0001f600++\U0001f308', u'\U0001f308++\U0001f600++\U0001f308', 3, 1, u'[2, 1, "IGNOR"]', u'[2, 1, "IGNOR"]', None, None, u'1', None), (u'explanation++.++right', u'explan++.++right', 3, 1, None, None, None, None, None, None), (u'.', u'.', 1, 7, u'1', u'1', None, None, u'1', None), (u'you', u'you', 1, 8, u'7', u'9', u'2', u'4', u'7', u'2'), (u'surprise++.++but', u'surpris++.++but', 3, 1, None, None, None, None, None, None), (u'?', u'?', 1, 2, u'1', u'1', None, None, u'1', None), (u'explanation++.++right++?', u'explan++.++right++?', 4, 1, None, None, None, None, None, None), (u'it++?++1', u'it++?++1', 3, 1, None, None, None, None, None, None), (u'tiny++model++,++but', u'tini++model++,++but', 4, 1, None, None, None, None, None, None), (u'you++think++about++it++?++1', u'you++think++about++it++?++1', 6, 1, None, None, None, None, None, None), (u'but++you++\U0001f600++\U0001f308', u'but++you++\U0001f600++\U0001f308', 4, 1, u'[3, 1, 1, 1]', u'[3, 2, 1, 1]', None, None, u'1', None), (u'but++a++big', u'but++a++big', 3, 1, None, None, None, None, None, None), (u'tiny++surprise++.++but++you++but', u'tini++surpris++.++but++you++but', 6, 1, None, None, None, None, None, None), (u'do++you++think++about++it', u'do++you++think++about++it', 5, 1, None, None, None, None, None, None), (u'big++explanation++.', u'big++explan++.', 3, 1, None, None, None, None, None, None), (u'think++about++it++?++1', u'think++about++it++?++1', 5, 1, None, None, None, None, None, None), (u'.++right', u'.++right', 2, 1, None, None, None, None, None, None), (u'explanation++.', u'explan++.', 2, 1, None, None, None, None, None, None), (u'but++you++but', u'but++you++but', 3, 2, u'[10, 4, "IGNOR"]', u'[15, 4, "IGNOR"]', u'[4, 2, "IGNOR"]', u'[10, 4, "IGNOR"]', u'2', u'2'), (u'.++but++you++but++you++\U0001f600', u'.++but++you++but++you++\U0001f600', 6, 1, None, None, None, None, None, None), (u'tiny++surprise', u'tini++surpris', 2, 1, None, None, None, None, None, None), (u'\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', 5, 1, u'[3, 2, "IGNOR", "IGNOR", "IGNOR"]', u'[3, 2, "IGNOR", "IGNOR", "IGNOR"]', None, None, u'1', None), (u'model++,++but++a', u'model++,++but++a', 4, 1, None, None, None, None, None, None), (u'you++think++about', u'you++think++about', 3, 1, None, None, None, None, None, None), (u'?++what++do++you', u'?++what++do++you', 4, 1, None, None, None, None, None, None), (u'explanation', u'explan', 1, 1, u'1', u'1', None, None, u'1', None), (u'you++but++you++\U0001f600++\U0001f308++\U0001f600', u'you++but++you++\U0001f600++\U0001f308++\U0001f600', 6, 1, u'[3, 3, "IGNOR", 2, 1, "IGNOR"]', u'[4, 3, "IGNOR", 2, 1, "IGNOR"]', None, None, u'1', None), (u'?++1', u'?++1', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'do++you++think++about++it++?', u'do++you++think++about++it++?', 6, 1, None, None, None, None, None, None), (u'do++you++think', u'do++you++think', 3, 1, None, None, None, None, None, None), (u'model++,++but', u'model++,++but', 3, 1, None, None, None, None, None, None), (u'tiny++model++,++but++a', u'tini++model++,++but++a', 5, 1, None, None, None, None, None, None), (u'.++but++you++but++you', u'.++but++you++but++you', 5, 2, None, None, None, None, None, None), (u'\U0001f62b++1++.++but', u'\U0001f62b++1++.++but', 4, 1, None, None, None, None, None, None), (u'right++?', u'right++?', 2, 1, None, None, None, None, None, None), (u'but++you++\U0001f600', u'but++you++\U0001f600', 3, 1, u'[3, 1, 1]', u'[3, 2, 1]', None, None, u'1', None), (u'model++,++but++a++big', u'model++,++but++a++big', 5, 1, None, None, None, None, None, None), (u'\U0001f62b++1++.++but++you++but', u'\U0001f62b++1++.++but++you++but', 6, 1, None, None, None, None, None, None), (u'tiny++surprise++.', u'tini++surpris++.', 3, 1, None, None, None, None, None, None), (u'?++what++do++you++think', u'?++what++do++you++think', 5, 1, None, None, None, None, None, None), (u'.++but++you++but', u'.++but++you++but', 4, 2, None, None, None, None, None, None), (u',++but++a++big++explanation', u',++but++a++big++explan', 5, 1, None, None, None, None, None, None), (u'\U0001f62b++1++.', u'\U0001f62b++1++.', 3, 1, None, None, None, None, None, None), (u'about', u'about', 1, 1, None, None, None, None, None, None), (u'it++?++1++\U0001f62b++1', u'it++?++1++\U0001f62b++1', 5, 1, None, None, None, None, None, None), (u'tiny++model++,++but++a++big', u'tini++model++,++but++a++big', 6, 1, None, None, None, None, None, None), (u'you++but', u'you++but', 2, 2, u'[4, 6]', u'[4, 8]', u'[2, 2]', u'[4, 6]', u'2', u'2'), (u'right++?++what', u'right++?++what', 3, 1, None, None, None, None, None, None), (u'\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'\U0001f308++\U0001f600++\U0001f308++\U0001f600', 4, 1, u'[2, 2, "IGNOR", "IGNOR"]', u'[2, 2, "IGNOR", "IGNOR"]', None, None, u'1', None), (u'but++you++but++you++\U0001f600', u'but++you++but++you++\U0001f600', 5, 1, u'[5, 3, "IGNOR", "IGNOR", 1]', u'[5, 4, "IGNOR", "IGNOR", 1]', None, None, u'1', None), (u'\U0001f62b++1++.++but++you', u'\U0001f62b++1++.++but++you', 5, 1, None, None, None, None, None, None), (u'surprise++.++but++you++but++you', u'surpris++.++but++you++but++you', 6, 1, None, None, None, None, None, None), (u'a++big++explanation++.++right', u'a++big++explan++.++right', 5, 1, None, None, None, None, None, None), (u'1++.++but', u'1++.++but', 3, 1, None, None, None, None, None, None), (u'you++but++you++\U0001f600++\U0001f308', u'you++but++you++\U0001f600++\U0001f308', 5, 1, u'[3, 3, "IGNOR", 1, 1]', u'[4, 3, "IGNOR", 1, 1]', None, None, u'1', None), (u'\U0001f62b++1', u'\U0001f62b++1', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'surprise++.', u'surpris++.', 2, 1, None, None, None, None, None, None), (u'tiny++model++,', u'tini++model++,', 3, 2, None, None, None, None, None, None), (u'right++?++what++do++you++think', u'right++?++what++do++you++think', 6, 1, None, None, None, None, None, None), (u'?++1++\U0001f62b', u'?++1++\U0001f62b', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None), (u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', 6, 1, u'[1, 3, 2, "IGNOR", "IGNOR", "IGNOR"]', u'[2, 3, 2, "IGNOR", "IGNOR", "IGNOR"]', None, None, u'1', None), (u'you++but++you++\U0001f600', u'you++but++you++\U0001f600', 4, 1, u'[3, 3, "IGNOR", 1]', u'[4, 3, "IGNOR", 1]', None, None, u'1', None), (u'about++it++?', u'about++it++?', 3, 1, None, None, None, None, None, None), (u'think++about++it', u'think++about++it', 3, 1, None, None, None, None, None, None), (u'surprise++.++but++you++but', u'surpris++.++but++you++but', 5, 1, None, None, None, None, None, None), (u'about++it', u'about++it', 2, 1, None, None, None, None, None, None), (u'1++.++but++you', u'1++.++but++you', 4, 1, None, None, None, None, None, None), (u'but++you++but++you', u'but++you++but++you', 4, 2, u'[10, 6, "IGNOR", "IGNOR"]', u'[15, 8, "IGNOR", "IGNOR"]', None, None, u'2', None), (u'about++it++?++1++\U0001f62b', u'about++it++?++1++\U0001f62b', 5, 1, None, None, None, None, None, None), (u'.++right++?', u'.++right++?', 3, 1, None, None, None, None, None, None), (u'tiny++surprise++.++but++you', u'tini++surpris++.++but++you', 5, 1, None, None, None, None, None, None), (u'you++think++about++it', u'you++think++about++it', 4, 1, None, None, None, None, None, None), (u'do++you', u'do++you', 2, 1, None, None, None, None, None, None), (u'1++\U0001f62b', u'1++\U0001f62b', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'.++right++?++what++do', u'.++right++?++what++do', 5, 1, None, None, None, None, None, None), (u'but++you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'but++you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', 6, 1, u'[3, 1, 2, 2, "IGNOR", "IGNOR"]', u'[3, 2, 2, 2, "IGNOR", "IGNOR"]', None, None, u'1', None), (u'1++\U0001f62b++1', u'1++\U0001f62b++1', 3, 1, u'[2, 1, "IGNOR"]', u'[2, 1, "IGNOR"]', None, None, u'1', None), (u'big++explanation++.++right', u'big++explan++.++right', 4, 1, None, None, None, None, None, None), (u'it++?++1++\U0001f62b++1++.', u'it++?++1++\U0001f62b++1++.', 6, 1, None, None, None, None, None, None), (u'?++1++\U0001f62b++1++.', u'?++1++\U0001f62b++1++.', 5, 1, None, None, None, None, None, None), (u'you++\U0001f600', u'you++\U0001f600', 2, 1, u'[1, 1]', u'[2, 1]', None, None, u'1', None), (u'a', u'a', 1, 3, None, None, None, None, None, None), (u'1++\U0001f62b++1++.++but++you', u'1++\U0001f62b++1++.++but++you', 6, 1, None, None, None, None, None, None), (u'you++\U0001f600++\U0001f308', u'you++\U0001f600++\U0001f308', 3, 1, u'[1, 1, 1]', u'[2, 1, 1]', None, None, u'1', None), (u'.++right++?++what++do++you', u'.++right++?++what++do++you', 6, 1, None, None, None, None, None, None), (u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', 5, 1, u'[1, 2, 2, "IGNOR", "IGNOR"]', u'[2, 2, 2, "IGNOR", "IGNOR"]', None, None, u'1', None), (u'think', u'think', 1, 1, None, None, None, None, None, None), (u'1++\U0001f62b++1++.++but', u'1++\U0001f62b++1++.++but', 5, 1, None, None, None, None, None, None), (u'think++about++it++?', u'think++about++it++?', 4, 1, None, None, None, None, None, None), (u'big', u'big', 1, 5, u'2', u'2', u'2', u'5', u'2', u'2'), (u'big++explanation', u'big++explan', 2, 1, None, None, None, None, None, None), (u'1++.++but++you++but++you', u'1++.++but++you++but++you', 6, 1, None, None, None, None, None, None), (u'right++?++what++do++you', u'right++?++what++do++you', 5, 1, None, None, None, None, None, None), (u'but++a++big++explanation', u'but++a++big++explan', 4, 1, None, None, None, None, None, None), (u'?++1++\U0001f62b++1', u'?++1++\U0001f62b++1', 4, 1, u'[1, 2, 1, "IGNOR"]', u'[1, 2, 1, "IGNOR"]', None, None, u'1', None), (u'do', u'do', 1, 1, None, None, None, None, None, None), (u'a++big++explanation++.', u'a++big++explan++.', 4, 1, None, None, None, None, None, None), (u'a++big++explanation++.++right++?', u'a++big++explan++.++right++?', 6, 1, None, None, None, None, None, None), (u'but++a', u'but++a', 2, 1, None, None, None, None, None, None), (u'1++.', u'1++.', 2, 1, None, None, None, None, None, None), (u',++but++a++big', u',++but++a++big', 4, 1, None, None, None, None, None, None), (u'but++i++realy++liked', u'but++i++reali++like', 4, 1, None, None, None, None, None, None), (u'liked++it++:p++=)++\U0001f600++\U0001f308', u'like++it++:p++=)++\U0001f600++\U0001f308', 6, 1, None, None, None, None, None, None), (u'was++realy', u'was++reali', 2, 1, None, None, None, None, None, None), (u',++but++i++realy', u',++but++i++reali', 4, 1, None, None, None, None, None, None), (u'bad++surprise++for++me++\U0001f62b++,', u'bad++surpris++for++me++\U0001f62b++,', 6, 1, None, None, None, None, None, None), (u'i++realy++liked++it++:p', u'i++reali++like++it++:p', 5, 1, None, None, None, None, None, None), (u'it', u'it', 1, 5, None, None, None, None, None, None), (u'but', u'but', 1, 13, u'11', u'16', u'4', u'10', u'11', u'4'), (u'realy++liked', u'reali++like', 2, 1, None, None, None, None, None, None), (u':p++=)++\U0001f600++\U0001f308++\U0001f600', u':p++=)++\U0001f600++\U0001f308++\U0001f600', 5, 1, None, None, None, None, None, None), (u'realy++bad++surprise++for', u'reali++bad++surpris++for', 4, 1, None, None, None, None, None, None), (u'me++\U0001f62b++,++but++i', u'me++\U0001f62b++,++but++i', 5, 1, None, None, None, None, None, None), (u'me', u'me', 1, 2, None, None, None, None, None, None), (u'was++realy++bad++surprise++for++me', u'was++reali++bad++surpris++for++me', 6, 1, None, None, None, None, None, None), (u'me++\U0001f62b++,', u'me++\U0001f62b++,', 3, 1, None, None, None, None, None, None), (u'liked++it++:p++=)++\U0001f600', u'like++it++:p++=)++\U0001f600', 5, 1, None, None, None, None, None, None), (u'\U0001f62b++,++but', u'\U0001f62b++,++but', 3, 1, None, None, None, None, None, None), (u'realy', u'reali', 1, 4, u'2', u'4', u'1', u'3', u'2', u'1'), (u'surprise++for++me++\U0001f62b', u'surpris++for++me++\U0001f62b', 4, 1, None, None, None, None, None, None), (u'i++realy++liked++it++:p++=)', u'i++reali++like++it++:p++=)', 6, 1, None, None, None, None, None, None), (u'\U0001f600', u'\U0001f600', 1, 5, u'4', u'4', None, None, u'4', None), (u'\U0001f308++\U0001f600', u'\U0001f308++\U0001f600', 2, 3, u'[2, 2]', u'[2, 2]', None, None, u'2', None), (u'=)', u'=)', 1, 1, u'1', u'1', None, None, u'1', None), (u':p', u':p', 1, 1, None, None, None, None, None, None), (u'i++realy++liked++it', u'i++reali++like++it', 4, 1, None, None, None, None, None, None), (u'me++\U0001f62b++,++but', u'me++\U0001f62b++,++but', 4, 1, None, None, None, None, None, None), (u'it++was', u'it++was', 2, 2, None, None, None, None, None, None), (u'surprise++for++me', u'surpris++for++me', 3, 1, None, None, None, None, None, None), (u'\U0001f62b++,++but++i++realy', u'\U0001f62b++,++but++i++reali', 5, 1, None, None, None, None, None, None), (u'=)++\U0001f600++\U0001f308', u'=)++\U0001f600++\U0001f308', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None), (u',++but++i', u',++but++i', 3, 1, None, None, None, None, None, None), (u'it++:p++=)++\U0001f600', u'it++:p++=)++\U0001f600', 4, 1, None, None, None, None, None, None), (u'but++i++realy++liked++it++:p', u'but++i++reali++like++it++:p', 6, 1, None, None, None, None, None, None), (u'realy++liked++it++:p++=)', u'reali++like++it++:p++=)', 5, 1, None, None, None, None, None, None), (u'=)++\U0001f600++\U0001f308++\U0001f600', u'=)++\U0001f600++\U0001f308++\U0001f600', 4, 1, None, None, None, None, None, None), (u'liked++it', u'like++it', 2, 1, None, None, None, None, None, None), (u'it++:p++=)', u'it++:p++=)', 3, 1, None, None, None, None, None, None), (u'realy++bad++surprise++for++me', u'reali++bad++surpris++for++me', 5, 1, None, None, None, None, None, None), (u'\U0001f62b++,', u'\U0001f62b++,', 2, 1, None, None, None, None, None, None), (u'but++i++realy++liked++it', u'but++i++reali++like++it', 5, 1, None, None, None, None, None, None), (u'for++me', u'for++me', 2, 2, None, None, None, None, None, None), (u'\U0001f308', u'\U0001f308', 1, 3, u'3', u'3', None, None, u'3', None), (u'for++me++\U0001f62b', u'for++me++\U0001f62b', 3, 1, None, None, None, None, None, None), (u'but++i', u'but++i', 2, 1, None, None, None, None, None, None), (u'bad++surprise', u'bad++surpris', 2, 1, None, None, None, None, None, None), (u'i++realy++liked', u'i++reali++like', 3, 1, None, None, None, None, None, None), (u'bad++surprise++for++me', u'bad++surpris++for++me', 4, 1, None, None, None, None, None, None), (u'for++me++\U0001f62b++,++but', u'for++me++\U0001f62b++,++but', 5, 1, None, None, None, None, None, None), (u'realy++liked++it', u'reali++like++it', 3, 1, None, None, None, None, None, None), (u'\U0001f600++\U0001f308', u'\U0001f600++\U0001f308', 2, 3, u'[3, 3]', u'[3, 3]', None, None, u'3', None), (u'it++:p', u'it++:p', 2, 1, None, None, None, None, None, None), (u'liked++it++:p', u'like++it++:p', 3, 1, None, None, None, None, None, None), (u'for', u'for', 1, 3, None, None, None, None, None, None), (u'for++me++\U0001f62b++,', u'for++me++\U0001f62b++,', 4, 1, None, None, None, None, None, None), (u'\U0001f600++\U0001f308++\U0001f600', u'\U0001f600++\U0001f308++\U0001f600', 3, 3, u'[2, 1, "IGNOR"]', u'[3, 1, "IGNOR"]', None, None, u'1', None), (u'realy++bad++surprise', u'reali++bad++surpris', 3, 1, None, None, None, None, None, None), (u'\U0001f62b++,++but++i++realy++liked', u'\U0001f62b++,++but++i++reali++like', 6, 1, None, None, None, None, None, None), (u'was++realy++bad++surprise', u'was++reali++bad++surpris', 4, 1, None, None, None, None, None, None), (u'me++\U0001f62b++,++but++i++realy', u'me++\U0001f62b++,++but++i++reali', 6, 1, None, None, None, None, None, None), (u',', u',', 1, 4, None, None, None, None, None, None), (u',++but', u',++but', 2, 2, None, None, None, None, None, None), (u'it++:p++=)++\U0001f600++\U0001f308', u'it++:p++=)++\U0001f600++\U0001f308', 5, 1, None, None, None, None, None, None), (u'was++realy++bad++surprise++for', u'was++reali++bad++surpris++for', 5, 1, None, None, None, None, None, None), (u'=)++\U0001f600', u'=)++\U0001f600', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'bad++surprise++for++me++\U0001f62b', u'bad++surpris++for++me++\U0001f62b', 5, 1, None, None, None, None, None, None), (u':p++=)', u':p++=)', 2, 1, None, None, None, None, None, None), (u'\U0001f62b++,++but++i', u'\U0001f62b++,++but++i', 4, 1, None, None, None, None, None, None), (u'realy++bad++surprise++for++me++\U0001f62b', u'reali++bad++surpris++for++me++\U0001f62b', 6, 1, None, None, None, None, None, None), (u':p++=)++\U0001f600', u':p++=)++\U0001f600', 3, 1, None, None, None, None, None, None), (u'me++\U0001f62b', u'me++\U0001f62b', 2, 1, None, None, None, None, None, None), (u'realy++liked++it++:p', u'reali++like++it++:p', 4, 1, None, None, None, None, None, None), (u'surprise', u'surpris', 1, 2, None, None, None, None, None, None), (u'it++was++realy++bad++surprise', u'it++was++reali++bad++surpris', 5, 1, None, None, None, None, None, None), (u'it++:p++=)++\U0001f600++\U0001f308++\U0001f600', u'it++:p++=)++\U0001f600++\U0001f308++\U0001f600', 6, 1, None, None, None, None, None, None), (u'\U0001f62b', u'\U0001f62b', 1, 3, u'3', u'3', None, None, u'3', None), (u'but++i++realy', u'but++i++reali', 3, 1, None, None, None, None, None, None), (u'it++was++realy++bad++surprise++for', u'it++was++reali++bad++surpris++for', 6, 1, None, None, None, None, None, None), (u':p++=)++\U0001f600++\U0001f308', u':p++=)++\U0001f600++\U0001f308', 4, 1, None, None, None, None, None, None), (u'bad', u'bad', 1, 6, u'4', u'7', u'1', u'5', u'4', u'1'), (u'surprise++for++me++\U0001f62b++,', u'surpris++for++me++\U0001f62b++,', 5, 1, None, None, None, None, None, None), (u'surprise++for++me++\U0001f62b++,++but', u'surpris++for++me++\U0001f62b++,++but', 6, 1, None, None, None, None, None, None), (u'it++was++realy++bad', u'it++was++reali++bad', 4, 1, None, None, None, None, None, None), (u'it++was++realy', u'it++was++reali', 3, 1, None, None, None, None, None, None), (u'bad++surprise++for', u'bad++surpris++for', 3, 1, None, None, None, None, None, None), (u'liked++it++:p++=)', u'like++it++:p++=)', 4, 1, None, None, None, None, None, None), (u'i', u'i', 1, 2, None, None, None, None, None, None), (u'surprise++for', u'surpris++for', 2, 1, None, None, None, None, None, None), (u'realy++liked++it++:p++=)++\U0001f600', u'reali++like++it++:p++=)++\U0001f600', 6, 1, None, None, None, None, None, None), (u',++but++i++realy++liked', u',++but++i++reali++like', 5, 1, None, None, None, None, None, None), (u'realy++bad', u'reali++bad', 2, 1, None, None, None, None, None, None), (u'for++me++\U0001f62b++,++but++i', u'for++me++\U0001f62b++,++but++i', 6, 1, None, None, None, None, None, None), (u'was++realy++bad', u'was++reali++bad', 3, 1, None, None, None, None, None, None), (u'was', u'was', 1, 2, None, None, None, None, None, None), (u'liked', u'like', 1, 1, None, None, None, None, None, None), (u'i++realy', u'i++reali', 2, 1, None, None, None, None, None, None), (u',++but++i++realy++liked++it', u',++but++i++reali++like++it', 6, 1, None, None, None, None, None, None)]
# right_baseline_freezed_not_full_repetativ = [(u'also++very++pity++for++me', u'also++veri++piti++for++me', 5, 1, u'[0, 2, 2, 0, 0]', u'[0, 4, 4, 0, 0]', u'[0, 1, 1, 0, 0]', u'[0, 3, 4, 0, 0]', None, None), (u'it++was++also++very', u'it++was++also++veri', 4, 1, u'[0, 0, 0, 2]', u'[0, 0, 0, 4]', u'[0, 0, 0, 1]', u'[0, 0, 0, 3]', None, None), (u'.++:-(++@real_trump++#shetlife', u'.++:-(++@real_trump++#shetlif', 4, 1, u'[1, 1, 0, 0]', u'[1, 1, 0, 0]', None, None, None, None), (u':-(++@real_trump++#shetlife++#readytogo++http://www.absurd.com', u':-(++@real_trump++#shetlif++#readytogo++http://www.absurd.com', 5, 1, u'[1, 0, 0, 0, 0]', u'[1, 0, 0, 0, 0]', None, None, None, None), (u'glad++to++se++you++-)', u'glad++to++se++you++-)', 5, 1, u'[1, 0, 1, 0, 1]', u'[1, 0, 1, 0, 1]', None, None, None, None), (u'me++.', u'me++.', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'.++:-(++@real_trump', u'.++:-(++@real_trump', 3, 1, u'[1, 1, 0]', u'[1, 1, 0]', None, None, None, None), (u'-)', u'-)', 1, 1, u'1', u'1', None, None, u'1', None), (u'you++-)', u'you++-)', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'me++.++:-(', u'me++.++:-(', 3, 1, u'[0, 1, 1]', u'[0, 1, 1]', None, None, None, None), (u'.++:-(++@real_trump++#shetlife++#readytogo', u'.++:-(++@real_trump++#shetlif++#readytogo', 5, 1, u'[1, 1, 0, 0, 0]', u'[1, 1, 0, 0, 0]', None, None, None, None), (u'pity++for++me++.', u'piti++for++me++.', 4, 1, u'[2, 0, 0, 1]', u'[4, 0, 0, 1]', u'[1, 0, 0, 0]', u'[4, 0, 0, 0]', None, None), (u'for++me++.++:-(', u'for++me++.++:-(', 4, 1, u'[0, 0, 1, 1]', u'[0, 0, 1, 1]', None, None, None, None), (u'me++.++:-(++@real_trump++#shetlife++#readytogo', u'me++.++:-(++@real_trump++#shetlif++#readytogo', 6, 1, u'[0, 1, 1, 0, 0, 0]', u'[0, 1, 1, 0, 0, 0]', None, None, None, None), (u'it++was++also++very++pity', u'it++was++also++veri++piti', 5, 1, u'[0, 0, 0, 2, 2]', u'[0, 0, 0, 4, 4]', u'[0, 0, 0, 1, 1]', u'[0, 0, 0, 3, 4]', None, None), (u'very++pity++for++me++.++:-(', u'veri++piti++for++me++.++:-(', 6, 1, u'[2, 2, 0, 0, 1, 1]', u'[4, 4, 0, 0, 1, 1]', u'[1, 1, 0, 0, 0, 0]', u'[3, 4, 0, 0, 0, 0]', None, None), (u'to++se++you++-)', u'to++se++you++-)', 4, 1, u'[0, 1, 0, 1]', u'[0, 1, 0, 1]', None, None, None, None), (u'it++was++also++very++pity++for', u'it++was++also++veri++piti++for', 6, 1, u'[0, 0, 0, 2, 2, 0]', u'[0, 0, 0, 4, 4, 0]', u'[0, 0, 0, 1, 1, 0]', u'[0, 0, 0, 3, 4, 0]', None, None), (u'very++pity++for', u'veri++piti++for', 3, 1, u'[2, 2, 0]', u'[4, 4, 0]', u'[1, 1, 0]', u'[3, 4, 0]', None, None), (u'se++you++-)', u'se++you++-)', 3, 1, u'[1, 0, 1]', u'[1, 0, 1]', None, None, None, None), (u'glad++to', u'glad++to', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'but++it++was++also++very', u'but++it++was++also++veri', 5, 1, u'[0, 0, 0, 0, 2]', u'[0, 0, 0, 0, 4]', u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 3]', None, None), (u'for++me++.', u'for++me++.', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', None, None, None, None), (u'pity++for++me++.++:-(', u'piti++for++me++.++:-(', 5, 1, u'[2, 0, 0, 1, 1]', u'[4, 0, 0, 1, 1]', u'[1, 0, 0, 0, 0]', u'[4, 0, 0, 0, 0]', None, None), (u'.++:-(++@real_trump++#shetlife++#readytogo++http://www.absurd.com', u'.++:-(++@real_trump++#shetlif++#readytogo++http://www.absurd.com', 6, 1, u'[1, 1, 0, 0, 0, 0]', u'[1, 1, 0, 0, 0, 0]', None, None, None, None), (u'pity', u'piti', 1, 4, u'2', u'4', u'1', u'4', u'2', u'1'), (u'me++.++:-(++@real_trump', u'me++.++:-(++@real_trump', 4, 1, u'[0, 1, 1, 0]', u'[0, 1, 1, 0]', None, None, None, None), (u'but++it++was++also++very++pity', u'but++it++was++also++veri++piti', 6, 1, u'[0, 0, 0, 0, 2, 2]', u'[0, 0, 0, 0, 4, 4]', u'[0, 0, 0, 0, 1, 1]', u'[0, 0, 0, 0, 3, 4]', None, None), (u'very++pity++for++me++.', u'veri++piti++for++me++.', 5, 1, u'[2, 2, 0, 0, 1]', u'[4, 4, 0, 0, 1]', u'[1, 1, 0, 0, 0]', u'[3, 4, 0, 0, 0]', None, None), (u'also++very++pity++for++me++.', u'also++veri++piti++for++me++.', 6, 1, u'[0, 2, 2, 0, 0, 1]', u'[0, 4, 4, 0, 0, 1]', u'[0, 1, 1, 0, 0, 0]', u'[0, 3, 4, 0, 0, 0]', None, None), (u'se++you', u'se++you', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'se', u'se', 1, 1, u'1', u'1', None, None, u'1', None), (u'for++me++.++:-(++@real_trump++#shetlife', u'for++me++.++:-(++@real_trump++#shetlif', 6, 1, u'[0, 0, 1, 1, 0, 0]', u'[0, 0, 1, 1, 0, 0]', None, None, None, None), (u'glad++to++se++you', u'glad++to++se++you', 4, 1, u'[1, 0, 1, 0]', u'[1, 0, 1, 0]', None, None, None, None), (u'very++pity++for++me', u'veri++piti++for++me', 4, 1, u'[2, 2, 0, 0]', u'[4, 4, 0, 0]', u'[1, 1, 0, 0]', u'[3, 4, 0, 0]', None, None), (u':-(++@real_trump', u':-(++@real_trump', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'pity++for++me++.++:-(++@real_trump', u'piti++for++me++.++:-(++@real_trump', 6, 1, u'[2, 0, 0, 1, 1, 0]', u'[4, 0, 0, 1, 1, 0]', u'[1, 0, 0, 0, 0, 0]', u'[4, 0, 0, 0, 0, 0]', None, None), (u'was++also++very++pity++for', u'was++also++veri++piti++for', 5, 1, u'[0, 0, 2, 2, 0]', u'[0, 0, 4, 4, 0]', u'[0, 0, 1, 1, 0]', u'[0, 0, 3, 4, 0]', None, None), (u'also++very', u'also++veri', 2, 1, u'[0, 2]', u'[0, 4]', u'[0, 1]', u'[0, 3]', None, None), (u'to++se', u'to++se', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'pity++for', u'piti++for', 2, 1, u'[2, 0]', u'[4, 0]', u'[1, 0]', u'[4, 0]', None, None), (u'to++se++you', u'to++se++you', 3, 1, u'[0, 1, 0]', u'[0, 1, 0]', None, None, None, None), (u'for++me++.++:-(++@real_trump', u'for++me++.++:-(++@real_trump', 5, 1, u'[0, 0, 1, 1, 0]', u'[0, 0, 1, 1, 0]', None, None, None, None), (u'also++very++pity', u'also++veri++piti', 3, 1, u'[0, 2, 2]', u'[0, 4, 4]', u'[0, 1, 1]', u'[0, 3, 4]', None, None), (u'very', u'veri', 1, 3, u'2', u'4', u'1', u'3', u'2', u'1'), (u'was++also++very', u'was++also++veri', 3, 1, u'[0, 0, 2]', u'[0, 0, 4]', u'[0, 0, 1]', u'[0, 0, 3]', None, None), (u'pity++for++me', u'piti++for++me', 3, 1, u'[2, 0, 0]', u'[4, 0, 0]', u'[1, 0, 0]', u'[4, 0, 0]', None, None), (u'me++.++:-(++@real_trump++#shetlife', u'me++.++:-(++@real_trump++#shetlif', 5, 1, u'[0, 1, 1, 0, 0]', u'[0, 1, 1, 0, 0]', None, None, None, None), (u'very++pity', u'veri++piti', 2, 1, u'[2, 2]', u'[4, 4]', u'[1, 1]', u'[3, 4]', None, None), (u'was++also++very++pity++for++me', u'was++also++veri++piti++for++me', 6, 1, u'[0, 0, 2, 2, 0, 0]', u'[0, 0, 4, 4, 0, 0]', u'[0, 0, 1, 1, 0, 0]', u'[0, 0, 3, 4, 0, 0]', None, None), (u'also++very++pity++for', u'also++veri++piti++for', 4, 1, u'[0, 2, 2, 0]', u'[0, 4, 4, 0]', u'[0, 1, 1, 0]', u'[0, 3, 4, 0]', None, None), (u':-(++@real_trump++#shetlife', u':-(++@real_trump++#shetlif', 3, 1, u'[1, 0, 0]', u'[1, 0, 0]', None, None, None, None), (u'glad++to++se', u'glad++to++se', 3, 1, u'[1, 0, 1]', u'[1, 0, 1]', None, None, None, None), (u':-(++@real_trump++#shetlife++#readytogo', u':-(++@real_trump++#shetlif++#readytogo', 4, 1, u'[1, 0, 0, 0]', u'[1, 0, 0, 0]', None, None, None, None), (u'.++:-(', u'.++:-(', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'glad', u'glad', 1, 1, u'1', u'1', None, None, u'1', None), (u'.++but++it++was++also++very', u'.++but++it++was++also++veri', 6, 1, u'[0, 0, 0, 0, 0, 2]', u'[0, 0, 0, 0, 0, 4]', u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 3]', None, None), (u'was++also++very++pity', u'was++also++veri++piti', 4, 1, u'[0, 0, 2, 2]', u'[0, 0, 4, 4]', u'[0, 0, 1, 1]', u'[0, 0, 3, 4]', None, None), (u'bad++news++,++which', u'bad++news++,++which', 4, 1, u'[4, 0, 0, 0]', u'[7, 0, 0, 0]', u'[1, 0, 0, 0]', u'[5, 0, 0, 0]', None, None), (u'tiny++model++,++which++we', u'tini++model++,++which++we', 5, 1, u'[0, 1, 0, 0, 0]', u'[0, 2, 0, 0, 0]', u'[1, 0, 0, 0, 0]', u'[6, 0, 0, 0, 0]', None, None), (u'acept++.++-(', u'acept++.++-(', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', None, None, None, None), (u'not++acept++.++-(++\U0001f62b', u'not++acept++.++-(++\U0001f62b', 5, 1, u'[0, 0, 0, 1, 1]', u'[0, 0, 0, 1, 1]', None, None, None, None), (u'tiny++model++,++which', u'tini++model++,++which', 4, 1, u'[0, 1, 0, 0]', u'[0, 2, 0, 0]', u'[1, 0, 0, 0]', u'[6, 0, 0, 0]', None, None), (u'a++bad++news++,', u'a++bad++news++,', 4, 1, u'[0, 4, 0, 0]', u'[0, 7, 0, 0]', u'[0, 1, 0, 0]', u'[0, 5, 0, 0]', None, None), (u'-(++\U0001f62b', u'-(++\U0001f62b', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'-(++\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'-(++\U0001f62b++:-(++#shetlif++http://www.noooo.com', 5, 1, u'[1, 1, 1, 0, 0]', u'[1, 1, 1, 0, 0]', u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 2, 0]', None, None), (u'explain++a++big', u'explain++a++big', 3, 1, u'[0, 0, 2]', u'[0, 0, 2]', u'[0, 0, 1]', u'[0, 0, 2]', None, None), (u'explain++a++big++things++.', u'explain++a++big++thing++.', 5, 1, u'[0, 0, 2, 0, 0]', u'[0, 0, 2, 0, 0]', u'[0, 0, 1, 0, 0]', u'[0, 0, 2, 0, 0]', None, None), (u'a++bad++news', u'a++bad++news', 3, 1, u'[0, 4, 0]', u'[0, 7, 0]', u'[0, 1, 0]', u'[0, 5, 0]', None, None), (u'bad++news++,++which++we', u'bad++news++,++which++we', 5, 1, u'[4, 0, 0, 0, 0]', u'[7, 0, 0, 0, 0]', u'[1, 0, 0, 0, 0]', u'[5, 0, 0, 0, 0]', None, None), (u'-(', u'-(', 1, 1, u'1', u'1', None, None, u'1', None), (u'bad++news++,++which++we++can', u'bad++news++,++which++we++can', 6, 1, u'[4, 0, 0, 0, 0, 0]', u'[7, 0, 0, 0, 0, 0]', u'[1, 0, 0, 0, 0, 0]', u'[5, 0, 0, 0, 0, 0]', None, None), (u'bad++news', u'bad++news', 2, 1, u'[4, 0]', u'[7, 0]', u'[1, 0]', u'[5, 0]', None, None), (u'a++bad++news++,++which', u'a++bad++news++,++which', 5, 1, u'[0, 4, 0, 0, 0]', u'[0, 7, 0, 0, 0]', u'[0, 1, 0, 0, 0]', u'[0, 5, 0, 0, 0]', None, None), (u'big++things++.', u'big++thing++.', 3, 1, u'[2, 0, 0]', u'[2, 0, 0]', u'[1, 0, 0]', u'[2, 0, 0]', None, None), (u'-(++\U0001f62b++:-(', u'-(++\U0001f62b++:-(', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, None, None), (u'model++,++which++we', u'model++,++which++we', 4, 1, u'[1, 0, 0, 0]', u'[2, 0, 0, 0]', None, None, None, None), (u'#shetlife', u'#shetlif', 1, 3, None, None, u'1', u'2', None, u'1'), (u'can++not++acept++.++-(++\U0001f62b', u'can++not++acept++.++-(++\U0001f62b', 6, 1, u'[0, 0, 0, 0, 1, 1]', u'[0, 0, 0, 0, 1, 1]', None, None, None, None), (u'big++things', u'big++thing', 2, 1, u'[2, 0]', u'[2, 0]', u'[1, 0]', u'[2, 0]', None, None), (u'acept++.++-(++\U0001f62b++:-(', u'acept++.++-(++\U0001f62b++:-(', 5, 1, u'[0, 0, 1, 1, 1]', u'[0, 0, 1, 1, 1]', None, None, None, None), (u'for++explain++a++big++things++.', u'for++explain++a++big++thing++.', 6, 1, u'[0, 0, 0, 2, 0, 0]', u'[0, 0, 0, 2, 0, 0]', u'[0, 0, 0, 1, 0, 0]', u'[0, 0, 0, 2, 0, 0]', None, None), (u'\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'\U0001f62b++:-(++#shetlif++http://www.noooo.com', 4, 1, u'[1, 1, 0, 0]', u'[1, 1, 0, 0]', u'[0, 0, 1, 0]', u'[0, 0, 2, 0]', None, None), (u'not++acept++.++-(', u'not++acept++.++-(', 4, 1, u'[0, 0, 0, 1]', u'[0, 0, 0, 1]', None, None, None, None), (u':-(++#shetlife', u':-(++#shetlif', 2, 1, u'[1, 0]', u'[1, 0]', u'[0, 1]', u'[0, 2]', None, None), (u'.++-(++\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'.++-(++\U0001f62b++:-(++#shetlif++http://www.noooo.com', 6, 1, u'[0, 1, 1, 1, 0, 0]', u'[0, 1, 1, 1, 0, 0]', u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 0, 2, 0]', None, None), (u'a++big++things', u'a++big++thing', 3, 1, u'[0, 2, 0]', u'[0, 2, 0]', u'[0, 1, 0]', u'[0, 2, 0]', None, None), (u'.++-(', u'.++-(', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'a++bad', u'a++bad', 2, 1, u'[0, 4]', u'[0, 7]', u'[0, 1]', u'[0, 5]', None, None), (u'can++not++acept++.++-(', u'can++not++acept++.++-(', 5, 1, u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 1]', None, None, None, None), (u'a++big++things++.', u'a++big++thing++.', 4, 1, u'[0, 2, 0, 0]', u'[0, 2, 0, 0]', u'[0, 1, 0, 0]', u'[0, 2, 0, 0]', None, None), (u'-(++\U0001f62b++:-(++#shetlife', u'-(++\U0001f62b++:-(++#shetlif', 4, 1, u'[1, 1, 1, 0]', u'[1, 1, 1, 0]', u'[0, 0, 0, 1]', u'[0, 0, 0, 2]', None, None), (u'acept++.++-(++\U0001f62b', u'acept++.++-(++\U0001f62b', 4, 1, u'[0, 0, 1, 1]', u'[0, 0, 1, 1]', None, None, None, None), (u':-(', u':-(', 1, 2, u'2', u'2', None, None, u'2', None), (u':-(++#shetlife++http://www.noooo.com', u':-(++#shetlif++http://www.noooo.com', 3, 1, u'[1, 0, 0]', u'[1, 0, 0]', u'[0, 1, 0]', u'[0, 2, 0]', None, None), (u'explain++a++big++things', u'explain++a++big++thing', 4, 1, u'[0, 0, 2, 0]', u'[0, 0, 2, 0]', u'[0, 0, 1, 0]', u'[0, 0, 2, 0]', None, None), (u'tiny++model++,++which++we++can', u'tini++model++,++which++we++can', 6, 1, u'[0, 1, 0, 0, 0, 0]', u'[0, 2, 0, 0, 0, 0]', u'[1, 0, 0, 0, 0, 0]', u'[6, 0, 0, 0, 0, 0]', None, None), (u'acept++.++-(++\U0001f62b++:-(++#shetlife', u'acept++.++-(++\U0001f62b++:-(++#shetlif', 6, 1, u'[0, 0, 1, 1, 1, 0]', u'[0, 0, 1, 1, 1, 0]', u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 2]', None, None), (u'use++for++explain++a++big++things', u'use++for++explain++a++big++thing', 6, 1, u'[0, 0, 0, 0, 2, 0]', u'[0, 0, 0, 0, 2, 0]', u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 0, 2, 0]', None, None), (u'use++for++explain++a++big', u'use++for++explain++a++big', 5, 1, u'[0, 0, 0, 0, 2]', u'[0, 0, 0, 0, 2]', u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 2]', None, None), (u'model++,++which++we++can++use', u'model++,++which++we++can++use', 6, 1, u'[1, 0, 0, 0, 0, 0]', u'[2, 0, 0, 0, 0, 0]', None, None, None, None), (u'not++acept++.++-(++\U0001f62b++:-(', u'not++acept++.++-(++\U0001f62b++:-(', 6, 1, u'[0, 0, 0, 1, 1, 1]', u'[0, 0, 0, 1, 1, 1]', None, None, None, None), (u'model++,++which++we++can', u'model++,++which++we++can', 5, 1, u'[1, 0, 0, 0, 0]', u'[2, 0, 0, 0, 0]', None, None, None, None), (u'bad++news++,', u'bad++news++,', 3, 1, u'[4, 0, 0]', u'[7, 0, 0]', u'[1, 0, 0]', u'[5, 0, 0]', None, None), (u'.++-(++\U0001f62b++:-(', u'.++-(++\U0001f62b++:-(', 4, 1, u'[0, 1, 1, 1]', u'[0, 1, 1, 1]', None, None, None, None), (u'for++explain++a++big', u'for++explain++a++big', 4, 1, u'[0, 0, 0, 2]', u'[0, 0, 0, 2]', u'[0, 0, 0, 1]', u'[0, 0, 0, 2]', None, None), (u'a++bad++news++,++which++we', u'a++bad++news++,++which++we', 6, 1, u'[0, 4, 0, 0, 0, 0]', u'[0, 7, 0, 0, 0, 0]', u'[0, 1, 0, 0, 0, 0]', u'[0, 5, 0, 0, 0, 0]', None, None), (u'#shetlife++http://www.noooo.com', u'#shetlif++http://www.noooo.com', 2, 1, None, None, u'[1, 0]', u'[2, 0]', None, None), (u'\U0001f62b++:-(', u'\U0001f62b++:-(', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'.++-(++\U0001f62b', u'.++-(++\U0001f62b', 3, 1, u'[0, 1, 1]', u'[0, 1, 1]', None, None, None, None), (u'we++can++not++acept++.++-(', u'we++can++not++acept++.++-(', 6, 1, u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 1]', None, None, None, None), (u'model++,++which', u'model++,++which', 3, 1, u'[1, 0, 0]', u'[2, 0, 0]', None, None, None, None), (u'.++-(++\U0001f62b++:-(++#shetlife', u'.++-(++\U0001f62b++:-(++#shetlif', 5, 1, u'[0, 1, 1, 1, 0]', u'[0, 1, 1, 1, 0]', u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 2]', None, None), (u'can++use++for++explain++a++big', u'can++use++for++explain++a++big', 6, 1, u'[0, 0, 0, 0, 0, 2]', u'[0, 0, 0, 0, 0, 2]', u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 2]', None, None), (u'\U0001f62b++:-(++#shetlife', u'\U0001f62b++:-(++#shetlif', 3, 1, u'[1, 1, 0]', u'[1, 1, 0]', u'[0, 0, 1]', u'[0, 0, 2]', None, None), (u'for++explain++a++big++things', u'for++explain++a++big++thing', 5, 1, u'[0, 0, 0, 2, 0]', u'[0, 0, 0, 2, 0]', u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 2, 0]', None, None), (u'model', u'model', 1, 2, u'1', u'2', None, None, u'1', None), (u'but++a++big++explanation++.++right', u'but++a++big++explan++.++right', 6, 1, u'[0, 0, 0, 1, 0, 1]', u'[0, 0, 0, 1, 0, 1]', u'[0, 0, 1, 0, 0, 0]', u'[0, 0, 3, 0, 0, 0]', None, None), (u'do++you++think++about', u'do++you++think++about', 4, 1, u'[0, 1, 0, 0]', u'[0, 1, 0, 0]', None, None, None, None), (u'you++\U0001f600++\U0001f308++\U0001f600', u'you++\U0001f600++\U0001f308++\U0001f600', 4, 1, u'[1, 2, 1, "IGNOR"]', u'[2, 2, 1, "IGNOR"]', None, None, None, None), (u'it++?++1++\U0001f62b', u'it++?++1++\U0001f62b', 4, 1, u'[0, 1, 1, 1]', u'[0, 1, 1, 1]', None, None, None, None), (u'a++big', u'a++big', 2, 2, u'[0, 2]', u'[0, 2]', u'[0, 2]', u'[0, 5]', None, None), (u'1++\U0001f62b++1++.', u'1++\U0001f62b++1++.', 4, 1, u'[2, 1, "IGNOR", 0]', u'[2, 1, "IGNOR", 0]', None, None, None, None), (u'you++think', u'you++think', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u',++but++a++big++explanation++.', u',++but++a++big++explan++.', 6, 1, u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 1, 0, 0]', u'[0, 0, 0, 3, 0, 0]', None, None), (u'what++do++you++think++about++it', u'what++do++you++think++about++it', 6, 1, u'[0, 0, 1, 0, 0, 0]', u'[0, 0, 1, 0, 0, 0]', None, None, None, None), (u'think++about++it++?++1++\U0001f62b', u'think++about++it++?++1++\U0001f62b', 6, 1, u'[0, 0, 0, 1, 1, 1]', u'[0, 0, 0, 1, 1, 1]', None, None, None, None), (u'but++you', u'but++you', 2, 4, u'[10, 6]', u'[15, 8]', u'[4, 2]', u'[10, 4]', None, None), (u'but++you++\U0001f600++\U0001f308++\U0001f600', u'but++you++\U0001f600++\U0001f308++\U0001f600', 5, 1, u'[3, 1, 2, 1, "IGNOR"]', u'[3, 2, 2, 1, "IGNOR"]', u'[1, 0, 0, 0, "IGNOR"]', u'[3, 0, 0, 0, "IGNOR"]', None, None), (u'.++but', u'.++but', 2, 3, u'[0, 4]', u'[0, 7]', u'[0, 2]', u'[0, 4]', None, None), (u'big++explanation++.++right++?++what', u'big++explan++.++right++?++what', 6, 1, u'[0, 1, 0, 1, 0, 0]', u'[0, 1, 0, 1, 0, 0]', u'[1, 0, 0, 0, 0, 0]', u'[3, 0, 0, 0, 0, 0]', None, None), (u'tiny++surprise++.++but', u'tini++surpris++.++but', 4, 1, u'[1, 0, 0, 2]', u'[1, 0, 0, 2]', u'[1, 0, 0, 1]', u'[3, 0, 0, 2]', None, None), (u'about++it++?++1++\U0001f62b++1', u'about++it++?++1++\U0001f62b++1', 6, 1, u'[0, 0, 1, 2, 1, "IGNOR"]', u'[0, 0, 1, 2, 1, "IGNOR"]', None, None, None, None), (u'you++think++about++it++?', u'you++think++about++it++?', 5, 1, u'[1, 0, 0, 0, 1]', u'[1, 0, 0, 0, 1]', None, None, None, None), (u'\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'\U0001f600++\U0001f308++\U0001f600++\U0001f308', 4, 1, u'[2, 2, "IGNOR", "IGNOR"]', u'[2, 2, "IGNOR", "IGNOR"]', None, None, None, None), (u'what++do++you', u'what++do++you', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', None, None, None, None), (u'but++a++big++explanation++.', u'but++a++big++explan++.', 5, 1, u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 1, 0]', u'[0, 0, 1, 0, 0]', u'[0, 0, 3, 0, 0]', None, None), (u'1', u'1', 1, 2, u'2', u'2', None, None, u'2', None), (u'model++,', u'model++,', 2, 2, u'[1, 0]', u'[2, 0]', None, None, None, None), (u'?++what++do++you++think++about', u'?++what++do++you++think++about', 6, 1, u'[0, 0, 0, 1, 0, 0]', u'[0, 0, 0, 1, 0, 0]', None, None, None, None), (u'what++do++you++think', u'what++do++you++think', 4, 1, u'[0, 0, 1, 0]', u'[0, 0, 1, 0]', None, None, None, None), (u'right++?++what++do', u'right++?++what++do', 4, 1, u'[1, 0, 0, 0]', u'[1, 0, 0, 0]', None, None, None, None), (u'.++right++?++what', u'.++right++?++what', 4, 1, u'[0, 1, 0, 0]', u'[0, 1, 0, 0]', None, None, None, None), (u'.++but++you', u'.++but++you', 3, 2, u'[0, 4, 4]', u'[0, 7, 4]', u'[0, 2, 2]', u'[0, 4, 4]', None, None), (u'about++it++?++1', u'about++it++?++1', 4, 1, u'[0, 0, 1, 1]', u'[0, 0, 1, 1]', None, None, None, None), (u'tiny', u'tini', 1, 10, u'1', u'1', u'2', u'9', u'1', u'2'), (u'tiny++model', u'tini++model', 2, 2, u'[0, 1]', u'[0, 2]', u'[1, 0]', u'[6, 0]', None, None), (u'surprise++.++but++you', u'surpris++.++but++you', 4, 1, u'[0, 0, 2, 2]', u'[0, 0, 2, 2]', u'[0, 0, 1, 1]', u'[0, 0, 2, 2]', None, None), (u'explanation++.++right++?++what', u'explan++.++right++?++what', 5, 1, u'[1, 0, 1, 0, 0]', u'[1, 0, 1, 0, 0]', None, None, None, None), (u'1++.++but++you++but', u'1++.++but++you++but', 5, 1, u'[1, 0, 5, 2, "IGNOR"]', u'[1, 0, 10, 2, "IGNOR"]', u'[0, 0, 2, 1, "IGNOR"]', u'[0, 0, 5, 2, "IGNOR"]', None, None), (u'model++,++but++a++big++explanation', u'model++,++but++a++big++explan', 6, 1, u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 0, 3, 0]', None, None), (u'?++1++\U0001f62b++1++.++but', u'?++1++\U0001f62b++1++.++but', 6, 1, u'[1, 2, 1, "IGNOR", 0, 2]', u'[1, 2, 1, "IGNOR", 0, 5]', u'[0, 0, 0, "IGNOR", 0, 1]', u'[0, 0, 0, "IGNOR", 0, 2]', None, None), (u'a++big++explanation', u'a++big++explan', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', u'[0, 1, 0]', u'[0, 3, 0]', None, None), (u'explanation++.++right++?++what++do', u'explan++.++right++?++what++do', 6, 1, u'[1, 0, 1, 0, 0, 0]', u'[1, 0, 1, 0, 0, 0]', None, None, None, None), (u'right', u'right', 1, 1, u'1', u'1', None, None, u'1', None), (u'you++but++you', u'you++but++you', 3, 2, u'[6, 6, "IGNOR"]', u'[8, 8, "IGNOR"]', u'[2, 2, "IGNOR"]', u'[4, 6, "IGNOR"]', None, None), (u'big++explanation++.++right++?', u'big++explan++.++right++?', 5, 1, u'[0, 1, 0, 1, 0]', u'[0, 1, 0, 1, 0]', u'[1, 0, 0, 0, 0]', u'[3, 0, 0, 0, 0]', None, None), (u'it++?', u'it++?', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'what++do++you++think++about', u'what++do++you++think++about', 5, 1, u'[0, 0, 1, 0, 0]', u'[0, 0, 1, 0, 0]', None, None, None, None), (u'but++you++but++you++\U0001f600++\U0001f308', u'but++you++but++you++\U0001f600++\U0001f308', 6, 1, u'[5, 3, "IGNOR", "IGNOR", 1, 1]', u'[5, 4, "IGNOR", "IGNOR", 1, 1]', u'[2, 1, "IGNOR", "IGNOR", 0, 0]', u'[5, 2, "IGNOR", "IGNOR", 0, 0]', None, None), (u'\U0001f308++\U0001f600++\U0001f308', u'\U0001f308++\U0001f600++\U0001f308', 3, 1, u'[2, 1, "IGNOR"]', u'[2, 1, "IGNOR"]', None, None, None, None), (u'explanation++.++right', u'explan++.++right', 3, 1, u'[1, 0, 1]', u'[1, 0, 1]', None, None, None, None), (u'.', u'.', 1, 7, u'1', u'1', None, None, u'1', None), (u'you', u'you', 1, 8, u'7', u'9', u'2', u'4', u'7', u'2'), (u'surprise++.++but', u'surpris++.++but', 3, 1, u'[0, 0, 2]', u'[0, 0, 2]', u'[0, 0, 1]', u'[0, 0, 2]', None, None), (u'?', u'?', 1, 2, u'1', u'1', None, None, u'1', None), (u'explanation++.++right++?', u'explan++.++right++?', 4, 1, u'[1, 0, 1, 0]', u'[1, 0, 1, 0]', None, None, None, None), (u'it++?++1', u'it++?++1', 3, 1, u'[0, 1, 1]', u'[0, 1, 1]', None, None, None, None), (u'you++think++about++it++?++1', u'you++think++about++it++?++1', 6, 1, u'[1, 0, 0, 0, 1, 1]', u'[1, 0, 0, 0, 1, 1]', None, None, None, None), (u'but++you++\U0001f600++\U0001f308', u'but++you++\U0001f600++\U0001f308', 4, 1, u'[3, 1, 1, 1]', u'[3, 2, 1, 1]', u'[1, 0, 0, 0]', u'[3, 0, 0, 0]', None, None), (u'but++a++big', u'but++a++big', 3, 1, None, None, u'[0, 0, 1]', u'[0, 0, 3]', None, None), (u'tiny++surprise++.++but++you++but', u'tini++surpris++.++but++you++but', 6, 1, u'[1, 0, 0, 5, 2, "IGNOR"]', u'[1, 0, 0, 5, 2, "IGNOR"]', u'[1, 0, 0, 2, 1, "IGNOR"]', u'[3, 0, 0, 5, 2, "IGNOR"]', None, None), (u'do++you++think++about++it', u'do++you++think++about++it', 5, 1, u'[0, 1, 0, 0, 0]', u'[0, 1, 0, 0, 0]', None, None, None, None), (u'big++explanation++.', u'big++explan++.', 3, 1, u'[0, 1, 0]', u'[0, 1, 0]', u'[1, 0, 0]', u'[3, 0, 0]', None, None), (u'think++about++it++?++1', u'think++about++it++?++1', 5, 1, u'[0, 0, 0, 1, 1]', u'[0, 0, 0, 1, 1]', None, None, None, None), (u'.++right', u'.++right', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'explanation++.', u'explan++.', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'but++you++but', u'but++you++but', 3, 2, u'[10, 4, "IGNOR"]', u'[15, 4, "IGNOR"]', u'[4, 2, "IGNOR"]', u'[10, 4, "IGNOR"]', None, None), (u'.++but++you++but++you++\U0001f600', u'.++but++you++but++you++\U0001f600', 6, 1, u'[0, 5, 3, "IGNOR", "IGNOR", 1]', u'[0, 5, 4, "IGNOR", "IGNOR", 1]', u'[0, 2, 1, "IGNOR", "IGNOR", 0]', u'[0, 5, 2, "IGNOR", "IGNOR", 0]', None, None), (u'tiny++surprise', u'tini++surpris', 2, 1, u'[1, 0]', u'[1, 0]', u'[1, 0]', u'[3, 0]', None, None), (u'\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', 5, 1, u'[3, 2, "IGNOR", "IGNOR", "IGNOR"]', u'[3, 2, "IGNOR", "IGNOR", "IGNOR"]', None, None, None, None), (u'you++think++about', u'you++think++about', 3, 1, u'[1, 0, 0]', u'[1, 0, 0]', None, None, None, None), (u'?++what++do++you', u'?++what++do++you', 4, 1, u'[0, 0, 0, 1]', u'[0, 0, 0, 1]', None, None, None, None), (u'explanation', u'explan', 1, 1, u'1', u'1', None, None, u'1', None), (u'you++but++you++\U0001f600++\U0001f308++\U0001f600', u'you++but++you++\U0001f600++\U0001f308++\U0001f600', 6, 1, u'[3, 3, "IGNOR", 2, 1, "IGNOR"]', u'[4, 3, "IGNOR", 2, 1, "IGNOR"]', u'[1, 1, "IGNOR", 0, 0, "IGNOR"]', u'[2, 3, "IGNOR", 0, 0, "IGNOR"]', None, None), (u'?++1', u'?++1', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'do++you++think++about++it++?', u'do++you++think++about++it++?', 6, 1, u'[0, 1, 0, 0, 0, 1]', u'[0, 1, 0, 0, 0, 1]', None, None, None, None), (u'do++you++think', u'do++you++think', 3, 1, u'[0, 1, 0]', u'[0, 1, 0]', None, None, None, None), (u'.++but++you++but++you', u'.++but++you++but++you', 5, 2, u'[0, 10, 6, "IGNOR", "IGNOR"]', u'[0, 15, 8, "IGNOR", "IGNOR"]', u'[0, 4, 2, "IGNOR", "IGNOR"]', u'[0, 10, 4, "IGNOR", "IGNOR"]', None, None), (u'\U0001f62b++1++.++but', u'\U0001f62b++1++.++but', 4, 1, u'[1, 1, 0, 2]', u'[1, 1, 0, 5]', u'[0, 0, 0, 1]', u'[0, 0, 0, 2]', None, None), (u'right++?', u'right++?', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'but++you++\U0001f600', u'but++you++\U0001f600', 3, 1, u'[3, 1, 1]', u'[3, 2, 1]', u'[1, 0, 0]', u'[3, 0, 0]', None, None), (u'model++,++but++a++big', u'model++,++but++a++big', 5, 1, None, None, u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 3]', None, None), (u'\U0001f62b++1++.++but++you++but', u'\U0001f62b++1++.++but++you++but', 6, 1, u'[1, 1, 0, 5, 2, "IGNOR"]', u'[1, 1, 0, 10, 2, "IGNOR"]', u'[0, 0, 0, 2, 1, "IGNOR"]', u'[0, 0, 0, 5, 2, "IGNOR"]', None, None), (u'tiny++surprise++.', u'tini++surpris++.', 3, 1, u'[1, 0, 0]', u'[1, 0, 0]', u'[1, 0, 0]', u'[3, 0, 0]', None, None), (u'?++what++do++you++think', u'?++what++do++you++think', 5, 1, u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 1, 0]', None, None, None, None), (u'.++but++you++but', u'.++but++you++but', 4, 2, u'[0, 10, 4, "IGNOR"]', u'[0, 15, 4, "IGNOR"]', u'[0, 4, 2, "IGNOR"]', u'[0, 10, 4, "IGNOR"]', None, None), (u',++but++a++big++explanation', u',++but++a++big++explan', 5, 1, u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 3, 0]', None, None), (u'\U0001f62b++1++.', u'\U0001f62b++1++.', 3, 1, u'[1, 1, 0]', u'[1, 1, 0]', None, None, None, None), (u'it++?++1++\U0001f62b++1', u'it++?++1++\U0001f62b++1', 5, 1, u'[0, 1, 2, 1, "IGNOR"]', u'[0, 1, 2, 1, "IGNOR"]', None, None, None, None), (u'tiny++model++,++but++a++big', u'tini++model++,++but++a++big', 6, 1, None, None, u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 3]', None, None), (u'you++but', u'you++but', 2, 2, u'[4, 6]', u'[4, 8]', u'[2, 2]', u'[4, 6]', None, None), (u'right++?++what', u'right++?++what', 3, 1, u'[1, 0, 0]', u'[1, 0, 0]', None, None, None, None), (u'\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'\U0001f308++\U0001f600++\U0001f308++\U0001f600', 4, 1, u'[2, 2, "IGNOR", "IGNOR"]', u'[2, 2, "IGNOR", "IGNOR"]', None, None, None, None), (u'but++you++but++you++\U0001f600', u'but++you++but++you++\U0001f600', 5, 1, u'[5, 3, "IGNOR", "IGNOR", 1]', u'[5, 4, "IGNOR", "IGNOR", 1]', u'[2, 1, "IGNOR", "IGNOR", 0]', u'[5, 2, "IGNOR", "IGNOR", 0]', None, None), (u'\U0001f62b++1++.++but++you', u'\U0001f62b++1++.++but++you', 5, 1, u'[1, 1, 0, 2, 2]', u'[1, 1, 0, 5, 2]', u'[0, 0, 0, 1, 1]', u'[0, 0, 0, 2, 2]', None, None), (u'surprise++.++but++you++but++you', u'surpris++.++but++you++but++you', 6, 1, u'[0, 0, 5, 3, "IGNOR", "IGNOR"]', u'[0, 0, 5, 4, "IGNOR", "IGNOR"]', u'[0, 0, 2, 1, "IGNOR", "IGNOR"]', u'[0, 0, 5, 2, "IGNOR", "IGNOR"]', None, None), (u'a++big++explanation++.++right', u'a++big++explan++.++right', 5, 1, u'[0, 0, 1, 0, 1]', u'[0, 0, 1, 0, 1]', u'[0, 1, 0, 0, 0]', u'[0, 3, 0, 0, 0]', None, None), (u'1++.++but', u'1++.++but', 3, 1, u'[1, 0, 2]', u'[1, 0, 5]', u'[0, 0, 1]', u'[0, 0, 2]', None, None), (u'you++but++you++\U0001f600++\U0001f308', u'you++but++you++\U0001f600++\U0001f308', 5, 1, u'[3, 3, "IGNOR", 1, 1]', u'[4, 3, "IGNOR", 1, 1]', u'[1, 1, "IGNOR", 0, 0]', u'[2, 3, "IGNOR", 0, 0]', None, None), (u'\U0001f62b++1', u'\U0001f62b++1', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'tiny++model++,', u'tini++model++,', 3, 2, u'[0, 1, 0]', u'[0, 2, 0]', u'[1, 0, 0]', u'[6, 0, 0]', None, None), (u'right++?++what++do++you++think', u'right++?++what++do++you++think', 6, 1, u'[1, 0, 0, 0, 1, 0]', u'[1, 0, 0, 0, 1, 0]', None, None, None, None), (u'?++1++\U0001f62b', u'?++1++\U0001f62b', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, None, None), (u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', 6, 1, u'[1, 3, 2, "IGNOR", "IGNOR", "IGNOR"]', u'[2, 3, 2, "IGNOR", "IGNOR", "IGNOR"]', None, None, None, None), (u'you++but++you++\U0001f600', u'you++but++you++\U0001f600', 4, 1, u'[3, 3, "IGNOR", 1]', u'[4, 3, "IGNOR", 1]', u'[1, 1, "IGNOR", 0]', u'[2, 3, "IGNOR", 0]', None, None), (u'about++it++?', u'about++it++?', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', None, None, None, None), (u'surprise++.++but++you++but', u'surpris++.++but++you++but', 5, 1, u'[0, 0, 5, 2, "IGNOR"]', u'[0, 0, 5, 2, "IGNOR"]', u'[0, 0, 2, 1, "IGNOR"]', u'[0, 0, 5, 2, "IGNOR"]', None, None), (u'1++.++but++you', u'1++.++but++you', 4, 1, u'[1, 0, 2, 2]', u'[1, 0, 5, 2]', u'[0, 0, 1, 1]', u'[0, 0, 2, 2]', None, None), (u'but++you++but++you', u'but++you++but++you', 4, 2, u'[10, 6, "IGNOR", "IGNOR"]', u'[15, 8, "IGNOR", "IGNOR"]', u'[4, 2, "IGNOR", "IGNOR"]', u'[10, 4, "IGNOR", "IGNOR"]', None, None), (u'about++it++?++1++\U0001f62b', u'about++it++?++1++\U0001f62b', 5, 1, u'[0, 0, 1, 1, 1]', u'[0, 0, 1, 1, 1]', None, None, None, None), (u'.++right++?', u'.++right++?', 3, 1, u'[0, 1, 0]', u'[0, 1, 0]', None, None, None, None), (u'tiny++surprise++.++but++you', u'tini++surpris++.++but++you', 5, 1, u'[1, 0, 0, 2, 2]', u'[1, 0, 0, 2, 2]', u'[1, 0, 0, 1, 1]', u'[3, 0, 0, 2, 2]', None, None), (u'you++think++about++it', u'you++think++about++it', 4, 1, u'[1, 0, 0, 0]', u'[1, 0, 0, 0]', None, None, None, None), (u'do++you', u'do++you', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'1++\U0001f62b', u'1++\U0001f62b', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'.++right++?++what++do', u'.++right++?++what++do', 5, 1, u'[0, 1, 0, 0, 0]', u'[0, 1, 0, 0, 0]', None, None, None, None), (u'but++you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'but++you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', 6, 1, u'[3, 1, 2, 2, "IGNOR", "IGNOR"]', u'[3, 2, 2, 2, "IGNOR", "IGNOR"]', u'[1, 0, 0, 0, "IGNOR", "IGNOR"]', u'[3, 0, 0, 0, "IGNOR", "IGNOR"]', None, None), (u'1++\U0001f62b++1', u'1++\U0001f62b++1', 3, 1, u'[2, 1, "IGNOR"]', u'[2, 1, "IGNOR"]', None, None, None, None), (u'big++explanation++.++right', u'big++explan++.++right', 4, 1, u'[0, 1, 0, 1]', u'[0, 1, 0, 1]', u'[1, 0, 0, 0]', u'[3, 0, 0, 0]', None, None), (u'it++?++1++\U0001f62b++1++.', u'it++?++1++\U0001f62b++1++.', 6, 1, u'[0, 1, 2, 1, "IGNOR", 0]', u'[0, 1, 2, 1, "IGNOR", 0]', None, None, None, None), (u'?++1++\U0001f62b++1++.', u'?++1++\U0001f62b++1++.', 5, 1, u'[1, 2, 1, "IGNOR", 0]', u'[1, 2, 1, "IGNOR", 0]', None, None, None, None), (u'you++\U0001f600', u'you++\U0001f600', 2, 1, u'[1, 1]', u'[2, 1]', None, None, None, None), (u'1++\U0001f62b++1++.++but++you', u'1++\U0001f62b++1++.++but++you', 6, 1, u'[2, 1, "IGNOR", 0, 2, 2]', u'[2, 1, "IGNOR", 0, 5, 2]', u'[0, 0, "IGNOR", 0, 1, 1]', u'[0, 0, "IGNOR", 0, 2, 2]', None, None), (u'you++\U0001f600++\U0001f308', u'you++\U0001f600++\U0001f308', 3, 1, u'[1, 1, 1]', u'[2, 1, 1]', None, None, None, None), (u'.++right++?++what++do++you', u'.++right++?++what++do++you', 6, 1, u'[0, 1, 0, 0, 0, 1]', u'[0, 1, 0, 0, 0, 1]', None, None, None, None), (u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', 5, 1, u'[1, 2, 2, "IGNOR", "IGNOR"]', u'[2, 2, 2, "IGNOR", "IGNOR"]', None, None, None, None), (u'1++\U0001f62b++1++.++but', u'1++\U0001f62b++1++.++but', 5, 1, u'[2, 1, "IGNOR", 0, 2]', u'[2, 1, "IGNOR", 0, 5]', u'[0, 0, "IGNOR", 0, 1]', u'[0, 0, "IGNOR", 0, 2]', None, None), (u'think++about++it++?', u'think++about++it++?', 4, 1, u'[0, 0, 0, 1]', u'[0, 0, 0, 1]', None, None, None, None), (u'big', u'big', 1, 5, u'2', u'2', u'2', u'5', u'2', u'2'), (u'big++explanation', u'big++explan', 2, 1, u'[0, 1]', u'[0, 1]', u'[1, 0]', u'[3, 0]', None, None), (u'1++.++but++you++but++you', u'1++.++but++you++but++you', 6, 1, u'[1, 0, 5, 3, "IGNOR", "IGNOR"]', u'[1, 0, 10, 4, "IGNOR", "IGNOR"]', u'[0, 0, 2, 1, "IGNOR", "IGNOR"]', u'[0, 0, 5, 2, "IGNOR", "IGNOR"]', None, None), (u'right++?++what++do++you', u'right++?++what++do++you', 5, 1, u'[1, 0, 0, 0, 1]', u'[1, 0, 0, 0, 1]', None, None, None, None), (u'but++a++big++explanation', u'but++a++big++explan', 4, 1, u'[0, 0, 0, 1]', u'[0, 0, 0, 1]', u'[0, 0, 1, 0]', u'[0, 0, 3, 0]', None, None), (u'?++1++\U0001f62b++1', u'?++1++\U0001f62b++1', 4, 1, u'[1, 2, 1, "IGNOR"]', u'[1, 2, 1, "IGNOR"]', None, None, None, None), (u'a++big++explanation++.', u'a++big++explan++.', 4, 1, u'[0, 0, 1, 0]', u'[0, 0, 1, 0]', u'[0, 1, 0, 0]', u'[0, 3, 0, 0]', None, None), (u'a++big++explanation++.++right++?', u'a++big++explan++.++right++?', 6, 1, u'[0, 0, 1, 0, 1, 0]', u'[0, 0, 1, 0, 1, 0]', u'[0, 1, 0, 0, 0, 0]', u'[0, 3, 0, 0, 0, 0]', None, None), (u'1++.', u'1++.', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u',++but++a++big', u',++but++a++big', 4, 1, None, None, u'[0, 0, 0, 1]', u'[0, 0, 0, 3]', None, None), (u'but++i++realy++liked', u'but++i++reali++like', 4, 1, u'[1, 0, 2, 0]', u'[1, 0, 4, 0]', u'[0, 0, 1, 0]', u'[0, 0, 3, 0]', None, None), (u'liked++it++:p++=)++\U0001f600++\U0001f308', u'like++it++:p++=)++\U0001f600++\U0001f308', 6, 1, u'[0, 0, 0, 1, 1, 1]', u'[0, 0, 0, 1, 1, 1]', None, None, None, None), (u',++but++i++realy', u',++but++i++reali', 4, 1, u'[0, 1, 0, 2]', u'[0, 1, 0, 4]', u'[0, 0, 0, 1]', u'[0, 0, 0, 3]', None, None), (u'bad++surprise++for++me++\U0001f62b++,', u'bad++surpris++for++me++\U0001f62b++,', 6, 1, u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 0, 1, 0]', None, None, None, None), (u'i++realy++liked++it++:p', u'i++reali++like++it++:p', 5, 1, u'[0, 2, 0, 0, 0]', u'[0, 4, 0, 0, 0]', u'[0, 1, 0, 0, 0]', u'[0, 3, 0, 0, 0]', None, None), (u'but', u'but', 1, 13, u'11', u'16', u'4', u'10', u'11', u'4'), (u'realy++liked', u'reali++like', 2, 1, u'[2, 0]', u'[4, 0]', u'[1, 0]', u'[3, 0]', None, None), (u':p++=)++\U0001f600++\U0001f308++\U0001f600', u':p++=)++\U0001f600++\U0001f308++\U0001f600', 5, 1, u'[0, 1, 1, 1, "IGNOR"]', u'[0, 1, 1, 1, "IGNOR"]', None, None, None, None), (u'me++\U0001f62b++,++but++i', u'me++\U0001f62b++,++but++i', 5, 1, u'[0, 1, 0, 1, 0]', u'[0, 1, 0, 1, 0]', None, None, None, None), (u'me++\U0001f62b++,', u'me++\U0001f62b++,', 3, 1, u'[0, 1, 0]', u'[0, 1, 0]', None, None, None, None), (u'liked++it++:p++=)++\U0001f600', u'like++it++:p++=)++\U0001f600', 5, 1, u'[0, 0, 0, 1, 1]', u'[0, 0, 0, 1, 1]', None, None, None, None), (u'\U0001f62b++,++but', u'\U0001f62b++,++but', 3, 1, u'[1, 0, 1]', u'[1, 0, 1]', None, None, None, None), (u'realy', u'reali', 1, 4, u'2', u'4', u'1', u'3', u'2', u'1'), (u'surprise++for++me++\U0001f62b', u'surpris++for++me++\U0001f62b', 4, 1, u'[0, 0, 0, 1]', u'[0, 0, 0, 1]', None, None, None, None), (u'i++realy++liked++it++:p++=)', u'i++reali++like++it++:p++=)', 6, 1, u'[0, 2, 0, 0, 0, 1]', u'[0, 4, 0, 0, 0, 1]', u'[0, 1, 0, 0, 0, 0]', u'[0, 3, 0, 0, 0, 0]', None, None), (u'\U0001f600', u'\U0001f600', 1, 5, u'4', u'4', None, None, u'4', None), (u'\U0001f308++\U0001f600', u'\U0001f308++\U0001f600', 2, 3, u'[3, 2]', u'[3, 2]', None, None, None, None), (u'=)', u'=)', 1, 1, u'1', u'1', None, None, u'1', None), (u'i++realy++liked++it', u'i++reali++like++it', 4, 1, u'[0, 2, 0, 0]', u'[0, 4, 0, 0]', u'[0, 1, 0, 0]', u'[0, 3, 0, 0]', None, None), (u'me++\U0001f62b++,++but', u'me++\U0001f62b++,++but', 4, 1, u'[0, 1, 0, 1]', u'[0, 1, 0, 1]', None, None, None, None), (u'\U0001f62b++,++but++i++realy', u'\U0001f62b++,++but++i++reali', 5, 1, u'[1, 0, 1, 0, 2]', u'[1, 0, 1, 0, 4]', u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 3]', None, None), (u'=)++\U0001f600++\U0001f308', u'=)++\U0001f600++\U0001f308', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, None, None), (u',++but++i', u',++but++i', 3, 1, u'[0, 1, 0]', u'[0, 1, 0]', None, None, None, None), (u'it++:p++=)++\U0001f600', u'it++:p++=)++\U0001f600', 4, 1, u'[0, 0, 1, 1]', u'[0, 0, 1, 1]', None, None, None, None), (u'but++i++realy++liked++it++:p', u'but++i++reali++like++it++:p', 6, 1, u'[1, 0, 2, 0, 0, 0]', u'[1, 0, 4, 0, 0, 0]', u'[0, 0, 1, 0, 0, 0]', u'[0, 0, 3, 0, 0, 0]', None, None), (u'realy++liked++it++:p++=)', u'reali++like++it++:p++=)', 5, 1, u'[2, 0, 0, 0, 1]', u'[4, 0, 0, 0, 1]', u'[1, 0, 0, 0, 0]', u'[3, 0, 0, 0, 0]', None, None), (u'=)++\U0001f600++\U0001f308++\U0001f600', u'=)++\U0001f600++\U0001f308++\U0001f600', 4, 1, u'[1, 1, 1, "IGNOR"]', u'[1, 1, 1, "IGNOR"]', None, None, None, None), (u'it++:p++=)', u'it++:p++=)', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', None, None, None, None), (u'\U0001f62b++,', u'\U0001f62b++,', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'but++i++realy++liked++it', u'but++i++reali++like++it', 5, 1, u'[1, 0, 2, 0, 0]', u'[1, 0, 4, 0, 0]', u'[0, 0, 1, 0, 0]', u'[0, 0, 3, 0, 0]', None, None), (u'\U0001f308', u'\U0001f308', 1, 3, u'3', u'3', None, None, u'3', None), (u'for++me++\U0001f62b', u'for++me++\U0001f62b', 3, 1, u'[0, 0, 1]', u'[0, 0, 1]', None, None, None, None), (u'but++i', u'but++i', 2, 1, u'[1, 0]', u'[1, 0]', None, None, None, None), (u'i++realy++liked', u'i++reali++like', 3, 1, u'[0, 2, 0]', u'[0, 4, 0]', u'[0, 1, 0]', u'[0, 3, 0]', None, None), (u'for++me++\U0001f62b++,++but', u'for++me++\U0001f62b++,++but', 5, 1, u'[0, 0, 1, 0, 1]', u'[0, 0, 1, 0, 1]', None, None, None, None), (u'realy++liked++it', u'reali++like++it', 3, 1, u'[2, 0, 0]', u'[4, 0, 0]', u'[1, 0, 0]', u'[3, 0, 0]', None, None), (u'\U0001f600++\U0001f308', u'\U0001f600++\U0001f308', 2, 3, u'[3, 3]', u'[3, 3]', None, None, None, None), (u'for++me++\U0001f62b++,', u'for++me++\U0001f62b++,', 4, 1, u'[0, 0, 1, 0]', u'[0, 0, 1, 0]', None, None, None, None), (u'\U0001f600++\U0001f308++\U0001f600', u'\U0001f600++\U0001f308++\U0001f600', 3, 3, u'[4, 3, "IGNOR"]', u'[5, 3, "IGNOR"]', None, None, None, None), (u'\U0001f62b++,++but++i++realy++liked', u'\U0001f62b++,++but++i++reali++like', 6, 1, u'[1, 0, 1, 0, 2, 0]', u'[1, 0, 1, 0, 4, 0]', u'[0, 0, 0, 0, 1, 0]', u'[0, 0, 0, 0, 3, 0]', None, None), (u'me++\U0001f62b++,++but++i++realy', u'me++\U0001f62b++,++but++i++reali', 6, 1, u'[0, 1, 0, 1, 0, 2]', u'[0, 1, 0, 1, 0, 4]', u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 3]', None, None), (u',++but', u',++but', 2, 2, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'it++:p++=)++\U0001f600++\U0001f308', u'it++:p++=)++\U0001f600++\U0001f308', 5, 1, u'[0, 0, 1, 1, 1]', u'[0, 0, 1, 1, 1]', None, None, None, None), (u'=)++\U0001f600', u'=)++\U0001f600', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None), (u'bad++surprise++for++me++\U0001f62b', u'bad++surpris++for++me++\U0001f62b', 5, 1, u'[0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 1]', None, None, None, None), (u':p++=)', u':p++=)', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'\U0001f62b++,++but++i', u'\U0001f62b++,++but++i', 4, 1, u'[1, 0, 1, 0]', u'[1, 0, 1, 0]', None, None, None, None), (u'realy++bad++surprise++for++me++\U0001f62b', u'reali++bad++surpris++for++me++\U0001f62b', 6, 1, u'[0, 0, 0, 0, 0, 1]', u'[0, 0, 0, 0, 0, 1]', None, None, None, None), (u':p++=)++\U0001f600', u':p++=)++\U0001f600', 3, 1, u'[0, 1, 1]', u'[0, 1, 1]', None, None, None, None), (u'me++\U0001f62b', u'me++\U0001f62b', 2, 1, u'[0, 1]', u'[0, 1]', None, None, None, None), (u'realy++liked++it++:p', u'reali++like++it++:p', 4, 1, u'[2, 0, 0, 0]', u'[4, 0, 0, 0]', u'[1, 0, 0, 0]', u'[3, 0, 0, 0]', None, None), (u'it++:p++=)++\U0001f600++\U0001f308++\U0001f600', u'it++:p++=)++\U0001f600++\U0001f308++\U0001f600', 6, 1, u'[0, 0, 1, 1, 1, "IGNOR"]', u'[0, 0, 1, 1, 1, "IGNOR"]', None, None, None, None), (u'\U0001f62b', u'\U0001f62b', 1, 3, u'3', u'3', None, None, u'3', None), (u'but++i++realy', u'but++i++reali', 3, 1, u'[1, 0, 2]', u'[1, 0, 4]', u'[0, 0, 1]', u'[0, 0, 3]', None, None), (u':p++=)++\U0001f600++\U0001f308', u':p++=)++\U0001f600++\U0001f308', 4, 1, u'[0, 1, 1, 1]', u'[0, 1, 1, 1]', None, None, None, None), (u'bad', u'bad', 1, 6, u'4', u'7', u'1', u'5', u'4', u'1'), (u'surprise++for++me++\U0001f62b++,', u'surpris++for++me++\U0001f62b++,', 5, 1, u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 1, 0]', None, None, None, None), (u'surprise++for++me++\U0001f62b++,++but', u'surpris++for++me++\U0001f62b++,++but', 6, 1, u'[0, 0, 0, 1, 0, 1]', u'[0, 0, 0, 1, 0, 1]', None, None, None, None), (u'liked++it++:p++=)', u'like++it++:p++=)', 4, 1, u'[0, 0, 0, 1]', u'[0, 0, 0, 1]', None, None, None, None), (u'realy++liked++it++:p++=)++\U0001f600', u'reali++like++it++:p++=)++\U0001f600', 6, 1, u'[2, 0, 0, 0, 1, 1]', u'[4, 0, 0, 0, 1, 1]', u'[1, 0, 0, 0, 0, 0]', u'[3, 0, 0, 0, 0, 0]', None, None), (u',++but++i++realy++liked', u',++but++i++reali++like', 5, 1, u'[0, 1, 0, 2, 0]', u'[0, 1, 0, 4, 0]', u'[0, 0, 0, 1, 0]', u'[0, 0, 0, 3, 0]', None, None), (u'for++me++\U0001f62b++,++but++i', u'for++me++\U0001f62b++,++but++i', 6, 1, u'[0, 0, 1, 0, 1, 0]', u'[0, 0, 1, 0, 1, 0]', None, None, None, None), (u'i++realy', u'i++reali', 2, 1, u'[0, 2]', u'[0, 4]', u'[0, 1]', u'[0, 3]', None, None), (u',++but++i++realy++liked++it', u',++but++i++reali++like++it', 6, 1, u'[0, 1, 0, 2, 0, 0]', u'[0, 1, 0, 4, 0, 0]', u'[0, 0, 0, 1, 0, 0]', u'[0, 0, 0, 3, 0, 0]', None, None)]
# right_baseline_freezed_full_repetativ = [(u'also++very++pity++for++me', u'also++veri++piti++for++me', 5, 1, None, None, None, None, None, None), (u'it++was++also++very', u'it++was++also++veri', 4, 1, None, None, None, None, None, None), (u'.++:-(++@real_trump++#shetlife', u'.++:-(++@real_trump++#shetlif', 4, 1, None, None, None, None, None, None), (u':-(++@real_trump++#shetlife++#readytogo++http://www.absurd.com', u':-(++@real_trump++#shetlif++#readytogo++http://www.absurd.com', 5, 1, None, None, None, None, None, None), (u'glad++to++se++you++-)', u'glad++to++se++you++-)', 5, 1, None, None, None, None, None, None), (u'me++.', u'me++.', 2, 1, None, None, None, None, None, None), (u'.++:-(++@real_trump', u'.++:-(++@real_trump', 3, 1, None, None, None, None, None, None), (u'-)', u'-)', 1, 1, u'1', u'1', None, None, u'1', None), (u'you++-)', u'you++-)', 2, 1, None, None, None, None, None, None), (u'me++.++:-(', u'me++.++:-(', 3, 1, None, None, None, None, None, None), (u'.++:-(++@real_trump++#shetlife++#readytogo', u'.++:-(++@real_trump++#shetlif++#readytogo', 5, 1, None, None, None, None, None, None), (u'pity++for++me++.', u'piti++for++me++.', 4, 1, None, None, None, None, None, None), (u'for++me++.++:-(', u'for++me++.++:-(', 4, 1, None, None, None, None, None, None), (u'me++.++:-(++@real_trump++#shetlife++#readytogo', u'me++.++:-(++@real_trump++#shetlif++#readytogo', 6, 1, None, None, None, None, None, None), (u'it++was++also++very++pity', u'it++was++also++veri++piti', 5, 1, None, None, None, None, None, None), (u'very++pity++for++me++.++:-(', u'veri++piti++for++me++.++:-(', 6, 1, None, None, None, None, None, None), (u'to++se++you++-)', u'to++se++you++-)', 4, 1, None, None, None, None, None, None), (u'it++was++also++very++pity++for', u'it++was++also++veri++piti++for', 6, 1, None, None, None, None, None, None), (u'very++pity++for', u'veri++piti++for', 3, 1, None, None, None, None, None, None), (u'se++you++-)', u'se++you++-)', 3, 1, None, None, None, None, None, None), (u'glad++to', u'glad++to', 2, 1, None, None, None, None, None, None), (u'but++it++was++also++very', u'but++it++was++also++veri', 5, 1, None, None, None, None, None, None), (u'for++me++.', u'for++me++.', 3, 1, None, None, None, None, None, None), (u'pity++for++me++.++:-(', u'piti++for++me++.++:-(', 5, 1, None, None, None, None, None, None), (u'.++:-(++@real_trump++#shetlife++#readytogo++http://www.absurd.com', u'.++:-(++@real_trump++#shetlif++#readytogo++http://www.absurd.com', 6, 1, None, None, None, None, None, None), (u'pity', u'piti', 1, 4, u'2', u'4', u'1', u'4', u'2', u'1'), (u'me++.++:-(++@real_trump', u'me++.++:-(++@real_trump', 4, 1, None, None, None, None, None, None), (u'but++it++was++also++very++pity', u'but++it++was++also++veri++piti', 6, 1, None, None, None, None, None, None), (u'very++pity++for++me++.', u'veri++piti++for++me++.', 5, 1, None, None, None, None, None, None), (u'also++very++pity++for++me++.', u'also++veri++piti++for++me++.', 6, 1, None, None, None, None, None, None), (u'se++you', u'se++you', 2, 1, None, None, None, None, None, None), (u'se', u'se', 1, 1, u'1', u'1', None, None, u'1', None), (u'for++me++.++:-(++@real_trump++#shetlife', u'for++me++.++:-(++@real_trump++#shetlif', 6, 1, None, None, None, None, None, None), (u'glad++to++se++you', u'glad++to++se++you', 4, 1, None, None, None, None, None, None), (u'very++pity++for++me', u'veri++piti++for++me', 4, 1, None, None, None, None, None, None), (u':-(++@real_trump', u':-(++@real_trump', 2, 1, None, None, None, None, None, None), (u'pity++for++me++.++:-(++@real_trump', u'piti++for++me++.++:-(++@real_trump', 6, 1, None, None, None, None, None, None), (u'was++also++very++pity++for', u'was++also++veri++piti++for', 5, 1, None, None, None, None, None, None), (u'also++very', u'also++veri', 2, 1, None, None, None, None, None, None), (u'to++se', u'to++se', 2, 1, None, None, None, None, None, None), (u'pity++for', u'piti++for', 2, 1, None, None, None, None, None, None), (u'to++se++you', u'to++se++you', 3, 1, None, None, None, None, None, None), (u'for++me++.++:-(++@real_trump', u'for++me++.++:-(++@real_trump', 5, 1, None, None, None, None, None, None), (u'also++very++pity', u'also++veri++piti', 3, 1, None, None, None, None, None, None), (u'very', u'veri', 1, 3, u'2', u'4', u'1', u'3', u'2', u'1'), (u'was++also++very', u'was++also++veri', 3, 1, None, None, None, None, None, None), (u'pity++for++me', u'piti++for++me', 3, 1, None, None, None, None, None, None), (u'me++.++:-(++@real_trump++#shetlife', u'me++.++:-(++@real_trump++#shetlif', 5, 1, None, None, None, None, None, None), (u'very++pity', u'veri++piti', 2, 1, u'[2, 2]', u'[4, 4]', u'[1, 1]', u'[3, 4]', u'1', u'1'), (u'was++also++very++pity++for++me', u'was++also++veri++piti++for++me', 6, 1, None, None, None, None, None, None), (u'also++very++pity++for', u'also++veri++piti++for', 4, 1, None, None, None, None, None, None), (u':-(++@real_trump++#shetlife', u':-(++@real_trump++#shetlif', 3, 1, None, None, None, None, None, None), (u'glad++to++se', u'glad++to++se', 3, 1, None, None, None, None, None, None), (u':-(++@real_trump++#shetlife++#readytogo', u':-(++@real_trump++#shetlif++#readytogo', 4, 1, None, None, None, None, None, None), (u'.++:-(', u'.++:-(', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'glad', u'glad', 1, 1, u'1', u'1', None, None, u'1', None), (u'.++but++it++was++also++very', u'.++but++it++was++also++veri', 6, 1, None, None, None, None, None, None), (u'was++also++very++pity', u'was++also++veri++piti', 4, 1, None, None, None, None, None, None), (u'bad++news++,++which', u'bad++news++,++which', 4, 1, None, None, None, None, None, None), (u'tiny++model++,++which++we', u'tini++model++,++which++we', 5, 1, None, None, None, None, None, None), (u'acept++.++-(', u'acept++.++-(', 3, 1, None, None, None, None, None, None), (u'not++acept++.++-(++\U0001f62b', u'not++acept++.++-(++\U0001f62b', 5, 1, None, None, None, None, None, None), (u'tiny++model++,++which', u'tini++model++,++which', 4, 1, None, None, None, None, None, None), (u'a++bad++news++,', u'a++bad++news++,', 4, 1, None, None, None, None, None, None), (u'-(++\U0001f62b', u'-(++\U0001f62b', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'-(++\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'-(++\U0001f62b++:-(++#shetlif++http://www.noooo.com', 5, 1, None, None, None, None, None, None), (u'explain++a++big', u'explain++a++big', 3, 1, None, None, None, None, None, None), (u'explain++a++big++things++.', u'explain++a++big++thing++.', 5, 1, None, None, None, None, None, None), (u'a++bad++news', u'a++bad++news', 3, 1, None, None, None, None, None, None), (u'bad++news++,++which++we', u'bad++news++,++which++we', 5, 1, None, None, None, None, None, None), (u'-(', u'-(', 1, 1, u'1', u'1', None, None, u'1', None), (u'bad++news++,++which++we++can', u'bad++news++,++which++we++can', 6, 1, None, None, None, None, None, None), (u'bad++news', u'bad++news', 2, 1, None, None, None, None, None, None), (u'a++bad++news++,++which', u'a++bad++news++,++which', 5, 1, None, None, None, None, None, None), (u'big++things++.', u'big++thing++.', 3, 1, None, None, None, None, None, None), (u'-(++\U0001f62b++:-(', u'-(++\U0001f62b++:-(', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None), (u'model++,++which++we', u'model++,++which++we', 4, 1, None, None, None, None, None, None), (u'#shetlife', u'#shetlif', 1, 3, None, None, u'1', u'2', None, u'1'), (u'can++not++acept++.++-(++\U0001f62b', u'can++not++acept++.++-(++\U0001f62b', 6, 1, None, None, None, None, None, None), (u'big++things', u'big++thing', 2, 1, None, None, None, None, None, None), (u'acept++.++-(++\U0001f62b++:-(', u'acept++.++-(++\U0001f62b++:-(', 5, 1, None, None, None, None, None, None), (u'for++explain++a++big++things++.', u'for++explain++a++big++thing++.', 6, 1, None, None, None, None, None, None), (u'\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'\U0001f62b++:-(++#shetlif++http://www.noooo.com', 4, 1, None, None, None, None, None, None), (u'not++acept++.++-(', u'not++acept++.++-(', 4, 1, None, None, None, None, None, None), (u':-(++#shetlife', u':-(++#shetlif', 2, 1, None, None, None, None, None, None), (u'.++-(++\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'.++-(++\U0001f62b++:-(++#shetlif++http://www.noooo.com', 6, 1, None, None, None, None, None, None), (u'a++big++things', u'a++big++thing', 3, 1, None, None, None, None, None, None), (u'.++-(', u'.++-(', 2, 1, None, None, None, None, None, None), (u'a++bad', u'a++bad', 2, 1, None, None, None, None, None, None), (u'can++not++acept++.++-(', u'can++not++acept++.++-(', 5, 1, None, None, None, None, None, None), (u'a++big++things++.', u'a++big++thing++.', 4, 1, None, None, None, None, None, None), (u'-(++\U0001f62b++:-(++#shetlife', u'-(++\U0001f62b++:-(++#shetlif', 4, 1, None, None, None, None, None, None), (u'acept++.++-(++\U0001f62b', u'acept++.++-(++\U0001f62b', 4, 1, None, None, None, None, None, None), (u':-(', u':-(', 1, 2, u'2', u'2', None, None, u'2', None), (u':-(++#shetlife++http://www.noooo.com', u':-(++#shetlif++http://www.noooo.com', 3, 1, None, None, None, None, None, None), (u'explain++a++big++things', u'explain++a++big++thing', 4, 1, None, None, None, None, None, None), (u'tiny++model++,++which++we++can', u'tini++model++,++which++we++can', 6, 1, None, None, None, None, None, None), (u'acept++.++-(++\U0001f62b++:-(++#shetlife', u'acept++.++-(++\U0001f62b++:-(++#shetlif', 6, 1, None, None, None, None, None, None), (u'use++for++explain++a++big++things', u'use++for++explain++a++big++thing', 6, 1, None, None, None, None, None, None), (u'use++for++explain++a++big', u'use++for++explain++a++big', 5, 1, None, None, None, None, None, None), (u'model++,++which++we++can++use', u'model++,++which++we++can++use', 6, 1, None, None, None, None, None, None), (u'not++acept++.++-(++\U0001f62b++:-(', u'not++acept++.++-(++\U0001f62b++:-(', 6, 1, None, None, None, None, None, None), (u'model++,++which++we++can', u'model++,++which++we++can', 5, 1, None, None, None, None, None, None), (u'bad++news++,', u'bad++news++,', 3, 1, None, None, None, None, None, None), (u'.++-(++\U0001f62b++:-(', u'.++-(++\U0001f62b++:-(', 4, 1, None, None, None, None, None, None), (u'for++explain++a++big', u'for++explain++a++big', 4, 1, None, None, None, None, None, None), (u'a++bad++news++,++which++we', u'a++bad++news++,++which++we', 6, 1, None, None, None, None, None, None), (u'#shetlife++http://www.noooo.com', u'#shetlif++http://www.noooo.com', 2, 1, None, None, None, None, None, None), (u'\U0001f62b++:-(', u'\U0001f62b++:-(', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'.++-(++\U0001f62b', u'.++-(++\U0001f62b', 3, 1, None, None, None, None, None, None), (u'we++can++not++acept++.++-(', u'we++can++not++acept++.++-(', 6, 1, None, None, None, None, None, None), (u'model++,++which', u'model++,++which', 3, 1, None, None, None, None, None, None), (u'.++-(++\U0001f62b++:-(++#shetlife', u'.++-(++\U0001f62b++:-(++#shetlif', 5, 1, None, None, None, None, None, None), (u'can++use++for++explain++a++big', u'can++use++for++explain++a++big', 6, 1, None, None, None, None, None, None), (u'\U0001f62b++:-(++#shetlife', u'\U0001f62b++:-(++#shetlif', 3, 1, None, None, None, None, None, None), (u'for++explain++a++big++things', u'for++explain++a++big++thing', 5, 1, None, None, None, None, None, None), (u'model', u'model', 1, 2, u'1', u'2', None, None, u'1', None), (u'but++a++big++explanation++.++right', u'but++a++big++explan++.++right', 6, 1, None, None, None, None, None, None), (u'do++you++think++about', u'do++you++think++about', 4, 1, None, None, None, None, None, None), (u'you++\U0001f600++\U0001f308++\U0001f600', u'you++\U0001f600++\U0001f308++\U0001f600', 4, 1, u'[1, 2, 1, "IGNOR"]', u'[2, 2, 1, "IGNOR"]', None, None, u'1', None), (u'it++?++1++\U0001f62b', u'it++?++1++\U0001f62b', 4, 1, None, None, None, None, None, None), (u'a++big', u'a++big', 2, 2, None, None, None, None, None, None), (u'1++\U0001f62b++1++.', u'1++\U0001f62b++1++.', 4, 1, None, None, None, None, None, None), (u'you++think', u'you++think', 2, 1, None, None, None, None, None, None), (u',++but++a++big++explanation++.', u',++but++a++big++explan++.', 6, 1, None, None, None, None, None, None), (u'what++do++you++think++about++it', u'what++do++you++think++about++it', 6, 1, None, None, None, None, None, None), (u'think++about++it++?++1++\U0001f62b', u'think++about++it++?++1++\U0001f62b', 6, 1, None, None, None, None, None, None), (u'but++you', u'but++you', 2, 4, u'[10, 6]', u'[15, 8]', u'[2, 2]', u'[4, 4]', u'4', u'2'), (u'but++you++\U0001f600++\U0001f308++\U0001f600', u'but++you++\U0001f600++\U0001f308++\U0001f600', 5, 1, u'[3, 1, 2, 1, "IGNOR"]', u'[3, 2, 2, 1, "IGNOR"]', None, None, u'1', None), (u'.++but', u'.++but', 2, 3, None, None, None, None, None, None), (u'big++explanation++.++right++?++what', u'big++explan++.++right++?++what', 6, 1, None, None, None, None, None, None), (u'tiny++surprise++.++but', u'tini++surpris++.++but', 4, 1, None, None, None, None, None, None), (u'about++it++?++1++\U0001f62b++1', u'about++it++?++1++\U0001f62b++1', 6, 1, None, None, None, None, None, None), (u'you++think++about++it++?', u'you++think++about++it++?', 5, 1, None, None, None, None, None, None), (u'\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'\U0001f600++\U0001f308++\U0001f600++\U0001f308', 4, 1, u'[2, 2, "IGNOR", "IGNOR"]', u'[2, 2, "IGNOR", "IGNOR"]', None, None, u'1', None), (u'what++do++you', u'what++do++you', 3, 1, None, None, None, None, None, None), (u'but++a++big++explanation++.', u'but++a++big++explan++.', 5, 1, None, None, None, None, None, None), (u'1', u'1', 1, 2, u'2', u'2', None, None, u'2', None), (u'model++,', u'model++,', 2, 2, None, None, None, None, None, None), (u'?++what++do++you++think++about', u'?++what++do++you++think++about', 6, 1, None, None, None, None, None, None), (u'what++do++you++think', u'what++do++you++think', 4, 1, None, None, None, None, None, None), (u'right++?++what++do', u'right++?++what++do', 4, 1, None, None, None, None, None, None), (u'.++right++?++what', u'.++right++?++what', 4, 1, None, None, None, None, None, None), (u'.++but++you', u'.++but++you', 3, 2, None, None, None, None, None, None), (u'about++it++?++1', u'about++it++?++1', 4, 1, None, None, None, None, None, None), (u'tiny', u'tini', 1, 10, u'1', u'1', u'2', u'9', u'1', u'2'), (u'tiny++model', u'tini++model', 2, 2, None, None, None, None, None, None), (u'surprise++.++but++you', u'surpris++.++but++you', 4, 1, None, None, None, None, None, None), (u'explanation++.++right++?++what', u'explan++.++right++?++what', 5, 1, None, None, None, None, None, None), (u'1++.++but++you++but', u'1++.++but++you++but', 5, 1, None, None, None, None, None, None), (u'model++,++but++a++big++explanation', u'model++,++but++a++big++explan', 6, 1, None, None, None, None, None, None), (u'?++1++\U0001f62b++1++.++but', u'?++1++\U0001f62b++1++.++but', 6, 1, None, None, None, None, None, None), (u'a++big++explanation', u'a++big++explan', 3, 1, None, None, None, None, None, None), (u'explanation++.++right++?++what++do', u'explan++.++right++?++what++do', 6, 1, None, None, None, None, None, None), (u'right', u'right', 1, 1, u'1', u'1', None, None, u'1', None), (u'you++but++you', u'you++but++you', 3, 2, u'[6, 6, "IGNOR"]', u'[8, 8, "IGNOR"]', None, None, u'2', None), (u'big++explanation++.++right++?', u'big++explan++.++right++?', 5, 1, None, None, None, None, None, None), (u'it++?', u'it++?', 2, 1, None, None, None, None, None, None), (u'what++do++you++think++about', u'what++do++you++think++about', 5, 1, None, None, None, None, None, None), (u'but++you++but++you++\U0001f600++\U0001f308', u'but++you++but++you++\U0001f600++\U0001f308', 6, 1, u'[5, 3, "IGNOR", "IGNOR", 1, 1]', u'[5, 4, "IGNOR", "IGNOR", 1, 1]', None, None, u'1', None), (u'\U0001f308++\U0001f600++\U0001f308', u'\U0001f308++\U0001f600++\U0001f308', 3, 1, u'[2, 1, "IGNOR"]', u'[2, 1, "IGNOR"]', None, None, u'1', None), (u'explanation++.++right', u'explan++.++right', 3, 1, None, None, None, None, None, None), (u'.', u'.', 1, 7, u'1', u'1', None, None, u'1', None), (u'you', u'you', 1, 8, u'7', u'9', u'2', u'4', u'7', u'2'), (u'surprise++.++but', u'surpris++.++but', 3, 1, None, None, None, None, None, None), (u'?', u'?', 1, 2, u'1', u'1', None, None, u'1', None), (u'explanation++.++right++?', u'explan++.++right++?', 4, 1, None, None, None, None, None, None), (u'it++?++1', u'it++?++1', 3, 1, None, None, None, None, None, None), (u'you++think++about++it++?++1', u'you++think++about++it++?++1', 6, 1, None, None, None, None, None, None), (u'but++you++\U0001f600++\U0001f308', u'but++you++\U0001f600++\U0001f308', 4, 1, u'[3, 1, 1, 1]', u'[3, 2, 1, 1]', None, None, u'1', None), (u'but++a++big', u'but++a++big', 3, 1, None, None, None, None, None, None), (u'tiny++surprise++.++but++you++but', u'tini++surpris++.++but++you++but', 6, 1, None, None, None, None, None, None), (u'do++you++think++about++it', u'do++you++think++about++it', 5, 1, None, None, None, None, None, None), (u'big++explanation++.', u'big++explan++.', 3, 1, None, None, None, None, None, None), (u'think++about++it++?++1', u'think++about++it++?++1', 5, 1, None, None, None, None, None, None), (u'.++right', u'.++right', 2, 1, None, None, None, None, None, None), (u'explanation++.', u'explan++.', 2, 1, None, None, None, None, None, None), (u'but++you++but', u'but++you++but', 3, 2, u'[10, 4, "IGNOR"]', u'[15, 4, "IGNOR"]', u'[4, 2, "IGNOR"]', u'[10, 4, "IGNOR"]', u'2', u'2'), (u'.++but++you++but++you++\U0001f600', u'.++but++you++but++you++\U0001f600', 6, 1, None, None, None, None, None, None), (u'tiny++surprise', u'tini++surpris', 2, 1, None, None, None, None, None, None), (u'\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', 5, 1, u'[3, 2, "IGNOR", "IGNOR", "IGNOR"]', u'[3, 2, "IGNOR", "IGNOR", "IGNOR"]', None, None, u'1', None), (u'you++think++about', u'you++think++about', 3, 1, None, None, None, None, None, None), (u'?++what++do++you', u'?++what++do++you', 4, 1, None, None, None, None, None, None), (u'explanation', u'explan', 1, 1, u'1', u'1', None, None, u'1', None), (u'you++but++you++\U0001f600++\U0001f308++\U0001f600', u'you++but++you++\U0001f600++\U0001f308++\U0001f600', 6, 1, u'[3, 3, "IGNOR", 2, 1, "IGNOR"]', u'[4, 3, "IGNOR", 2, 1, "IGNOR"]', None, None, u'1', None), (u'?++1', u'?++1', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'do++you++think++about++it++?', u'do++you++think++about++it++?', 6, 1, None, None, None, None, None, None), (u'do++you++think', u'do++you++think', 3, 1, None, None, None, None, None, None), (u'.++but++you++but++you', u'.++but++you++but++you', 5, 2, None, None, None, None, None, None), (u'\U0001f62b++1++.++but', u'\U0001f62b++1++.++but', 4, 1, None, None, None, None, None, None), (u'right++?', u'right++?', 2, 1, None, None, None, None, None, None), (u'but++you++\U0001f600', u'but++you++\U0001f600', 3, 1, u'[3, 1, 1]', u'[3, 2, 1]', None, None, u'1', None), (u'model++,++but++a++big', u'model++,++but++a++big', 5, 1, None, None, None, None, None, None), (u'\U0001f62b++1++.++but++you++but', u'\U0001f62b++1++.++but++you++but', 6, 1, None, None, None, None, None, None), (u'tiny++surprise++.', u'tini++surpris++.', 3, 1, None, None, None, None, None, None), (u'?++what++do++you++think', u'?++what++do++you++think', 5, 1, None, None, None, None, None, None), (u'.++but++you++but', u'.++but++you++but', 4, 2, None, None, None, None, None, None), (u',++but++a++big++explanation', u',++but++a++big++explan', 5, 1, None, None, None, None, None, None), (u'\U0001f62b++1++.', u'\U0001f62b++1++.', 3, 1, None, None, None, None, None, None), (u'it++?++1++\U0001f62b++1', u'it++?++1++\U0001f62b++1', 5, 1, None, None, None, None, None, None), (u'tiny++model++,++but++a++big', u'tini++model++,++but++a++big', 6, 1, None, None, None, None, None, None), (u'you++but', u'you++but', 2, 2, u'[4, 6]', u'[4, 8]', u'[2, 2]', u'[4, 6]', u'2', u'2'), (u'right++?++what', u'right++?++what', 3, 1, None, None, None, None, None, None), (u'\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'\U0001f308++\U0001f600++\U0001f308++\U0001f600', 4, 1, u'[2, 2, "IGNOR", "IGNOR"]', u'[2, 2, "IGNOR", "IGNOR"]', None, None, u'1', None), (u'but++you++but++you++\U0001f600', u'but++you++but++you++\U0001f600', 5, 1, u'[5, 3, "IGNOR", "IGNOR", 1]', u'[5, 4, "IGNOR", "IGNOR", 1]', None, None, u'1', None), (u'\U0001f62b++1++.++but++you', u'\U0001f62b++1++.++but++you', 5, 1, None, None, None, None, None, None), (u'surprise++.++but++you++but++you', u'surpris++.++but++you++but++you', 6, 1, None, None, None, None, None, None), (u'a++big++explanation++.++right', u'a++big++explan++.++right', 5, 1, None, None, None, None, None, None), (u'1++.++but', u'1++.++but', 3, 1, None, None, None, None, None, None), (u'you++but++you++\U0001f600++\U0001f308', u'you++but++you++\U0001f600++\U0001f308', 5, 1, u'[3, 3, "IGNOR", 1, 1]', u'[4, 3, "IGNOR", 1, 1]', None, None, u'1', None), (u'\U0001f62b++1', u'\U0001f62b++1', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'tiny++model++,', u'tini++model++,', 3, 2, None, None, None, None, None, None), (u'right++?++what++do++you++think', u'right++?++what++do++you++think', 6, 1, None, None, None, None, None, None), (u'?++1++\U0001f62b', u'?++1++\U0001f62b', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None), (u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', 6, 1, u'[1, 3, 2, "IGNOR", "IGNOR", "IGNOR"]', u'[2, 3, 2, "IGNOR", "IGNOR", "IGNOR"]', None, None, u'1', None), (u'you++but++you++\U0001f600', u'you++but++you++\U0001f600', 4, 1, u'[3, 3, "IGNOR", 1]', u'[4, 3, "IGNOR", 1]', None, None, u'1', None), (u'about++it++?', u'about++it++?', 3, 1, None, None, None, None, None, None), (u'surprise++.++but++you++but', u'surpris++.++but++you++but', 5, 1, None, None, None, None, None, None), (u'1++.++but++you', u'1++.++but++you', 4, 1, None, None, None, None, None, None), (u'but++you++but++you', u'but++you++but++you', 4, 2, u'[10, 6, "IGNOR", "IGNOR"]', u'[15, 8, "IGNOR", "IGNOR"]', None, None, u'2', None), (u'about++it++?++1++\U0001f62b', u'about++it++?++1++\U0001f62b', 5, 1, None, None, None, None, None, None), (u'.++right++?', u'.++right++?', 3, 1, None, None, None, None, None, None), (u'tiny++surprise++.++but++you', u'tini++surpris++.++but++you', 5, 1, None, None, None, None, None, None), (u'you++think++about++it', u'you++think++about++it', 4, 1, None, None, None, None, None, None), (u'do++you', u'do++you', 2, 1, None, None, None, None, None, None), (u'1++\U0001f62b', u'1++\U0001f62b', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'.++right++?++what++do', u'.++right++?++what++do', 5, 1, None, None, None, None, None, None), (u'but++you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'but++you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', 6, 1, u'[3, 1, 2, 2, "IGNOR", "IGNOR"]', u'[3, 2, 2, 2, "IGNOR", "IGNOR"]', None, None, u'1', None), (u'1++\U0001f62b++1', u'1++\U0001f62b++1', 3, 1, u'[2, 1, "IGNOR"]', u'[2, 1, "IGNOR"]', None, None, u'1', None), (u'big++explanation++.++right', u'big++explan++.++right', 4, 1, None, None, None, None, None, None), (u'it++?++1++\U0001f62b++1++.', u'it++?++1++\U0001f62b++1++.', 6, 1, None, None, None, None, None, None), (u'?++1++\U0001f62b++1++.', u'?++1++\U0001f62b++1++.', 5, 1, None, None, None, None, None, None), (u'you++\U0001f600', u'you++\U0001f600', 2, 1, u'[1, 1]', u'[2, 1]', None, None, u'1', None), (u'1++\U0001f62b++1++.++but++you', u'1++\U0001f62b++1++.++but++you', 6, 1, None, None, None, None, None, None), (u'you++\U0001f600++\U0001f308', u'you++\U0001f600++\U0001f308', 3, 1, u'[1, 1, 1]', u'[2, 1, 1]', None, None, u'1', None), (u'.++right++?++what++do++you', u'.++right++?++what++do++you', 6, 1, None, None, None, None, None, None), (u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', 5, 1, u'[1, 2, 2, "IGNOR", "IGNOR"]', u'[2, 2, 2, "IGNOR", "IGNOR"]', None, None, u'1', None), (u'1++\U0001f62b++1++.++but', u'1++\U0001f62b++1++.++but', 5, 1, None, None, None, None, None, None), (u'think++about++it++?', u'think++about++it++?', 4, 1, None, None, None, None, None, None), (u'big', u'big', 1, 5, u'2', u'2', u'2', u'5', u'2', u'2'), (u'big++explanation', u'big++explan', 2, 1, None, None, None, None, None, None), (u'1++.++but++you++but++you', u'1++.++but++you++but++you', 6, 1, None, None, None, None, None, None), (u'right++?++what++do++you', u'right++?++what++do++you', 5, 1, None, None, None, None, None, None), (u'but++a++big++explanation', u'but++a++big++explan', 4, 1, None, None, None, None, None, None), (u'?++1++\U0001f62b++1', u'?++1++\U0001f62b++1', 4, 1, u'[1, 2, 1, "IGNOR"]', u'[1, 2, 1, "IGNOR"]', None, None, u'1', None), (u'a++big++explanation++.', u'a++big++explan++.', 4, 1, None, None, None, None, None, None), (u'a++big++explanation++.++right++?', u'a++big++explan++.++right++?', 6, 1, None, None, None, None, None, None), (u'1++.', u'1++.', 2, 1, None, None, None, None, None, None), (u',++but++a++big', u',++but++a++big', 4, 1, None, None, None, None, None, None), (u'but++i++realy++liked', u'but++i++reali++like', 4, 1, None, None, None, None, None, None), (u'liked++it++:p++=)++\U0001f600++\U0001f308', u'like++it++:p++=)++\U0001f600++\U0001f308', 6, 1, None, None, None, None, None, None), (u',++but++i++realy', u',++but++i++reali', 4, 1, None, None, None, None, None, None), (u'bad++surprise++for++me++\U0001f62b++,', u'bad++surpris++for++me++\U0001f62b++,', 6, 1, None, None, None, None, None, None), (u'i++realy++liked++it++:p', u'i++reali++like++it++:p', 5, 1, None, None, None, None, None, None), (u'but', u'but', 1, 13, u'11', u'16', u'4', u'10', u'11', u'4'), (u'realy++liked', u'reali++like', 2, 1, None, None, None, None, None, None), (u':p++=)++\U0001f600++\U0001f308++\U0001f600', u':p++=)++\U0001f600++\U0001f308++\U0001f600', 5, 1, None, None, None, None, None, None), (u'me++\U0001f62b++,++but++i', u'me++\U0001f62b++,++but++i', 5, 1, None, None, None, None, None, None), (u'me++\U0001f62b++,', u'me++\U0001f62b++,', 3, 1, None, None, None, None, None, None), (u'liked++it++:p++=)++\U0001f600', u'like++it++:p++=)++\U0001f600', 5, 1, None, None, None, None, None, None), (u'\U0001f62b++,++but', u'\U0001f62b++,++but', 3, 1, None, None, None, None, None, None), (u'realy', u'reali', 1, 4, u'2', u'4', u'1', u'3', u'2', u'1'), (u'surprise++for++me++\U0001f62b', u'surpris++for++me++\U0001f62b', 4, 1, None, None, None, None, None, None), (u'i++realy++liked++it++:p++=)', u'i++reali++like++it++:p++=)', 6, 1, None, None, None, None, None, None), (u'\U0001f600', u'\U0001f600', 1, 5, u'4', u'4', None, None, u'4', None), (u'\U0001f308++\U0001f600', u'\U0001f308++\U0001f600', 2, 3, u'[2, 2]', u'[2, 2]', None, None, u'2', None), (u'=)', u'=)', 1, 1, u'1', u'1', None, None, u'1', None), (u'i++realy++liked++it', u'i++reali++like++it', 4, 1, None, None, None, None, None, None), (u'me++\U0001f62b++,++but', u'me++\U0001f62b++,++but', 4, 1, None, None, None, None, None, None), (u'\U0001f62b++,++but++i++realy', u'\U0001f62b++,++but++i++reali', 5, 1, None, None, None, None, None, None), (u'=)++\U0001f600++\U0001f308', u'=)++\U0001f600++\U0001f308', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None), (u',++but++i', u',++but++i', 3, 1, None, None, None, None, None, None), (u'it++:p++=)++\U0001f600', u'it++:p++=)++\U0001f600', 4, 1, None, None, None, None, None, None), (u'but++i++realy++liked++it++:p', u'but++i++reali++like++it++:p', 6, 1, None, None, None, None, None, None), (u'realy++liked++it++:p++=)', u'reali++like++it++:p++=)', 5, 1, None, None, None, None, None, None), (u'=)++\U0001f600++\U0001f308++\U0001f600', u'=)++\U0001f600++\U0001f308++\U0001f600', 4, 1, None, None, None, None, None, None), (u'it++:p++=)', u'it++:p++=)', 3, 1, None, None, None, None, None, None), (u'\U0001f62b++,', u'\U0001f62b++,', 2, 1, None, None, None, None, None, None), (u'but++i++realy++liked++it', u'but++i++reali++like++it', 5, 1, None, None, None, None, None, None), (u'\U0001f308', u'\U0001f308', 1, 3, u'3', u'3', None, None, u'3', None), (u'for++me++\U0001f62b', u'for++me++\U0001f62b', 3, 1, None, None, None, None, None, None), (u'but++i', u'but++i', 2, 1, None, None, None, None, None, None), (u'i++realy++liked', u'i++reali++like', 3, 1, None, None, None, None, None, None), (u'for++me++\U0001f62b++,++but', u'for++me++\U0001f62b++,++but', 5, 1, None, None, None, None, None, None), (u'realy++liked++it', u'reali++like++it', 3, 1, None, None, None, None, None, None), (u'\U0001f600++\U0001f308', u'\U0001f600++\U0001f308', 2, 3, u'[3, 3]', u'[3, 3]', None, None, u'3', None), (u'for++me++\U0001f62b++,', u'for++me++\U0001f62b++,', 4, 1, None, None, None, None, None, None), (u'\U0001f600++\U0001f308++\U0001f600', u'\U0001f600++\U0001f308++\U0001f600', 3, 3, u'[2, 1, "IGNOR"]', u'[3, 1, "IGNOR"]', None, None, u'1', None), (u'\U0001f62b++,++but++i++realy++liked', u'\U0001f62b++,++but++i++reali++like', 6, 1, None, None, None, None, None, None), (u'me++\U0001f62b++,++but++i++realy', u'me++\U0001f62b++,++but++i++reali', 6, 1, None, None, None, None, None, None), (u',++but', u',++but', 2, 2, None, None, None, None, None, None), (u'it++:p++=)++\U0001f600++\U0001f308', u'it++:p++=)++\U0001f600++\U0001f308', 5, 1, None, None, None, None, None, None), (u'=)++\U0001f600', u'=)++\U0001f600', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None), (u'bad++surprise++for++me++\U0001f62b', u'bad++surpris++for++me++\U0001f62b', 5, 1, None, None, None, None, None, None), (u':p++=)', u':p++=)', 2, 1, None, None, None, None, None, None), (u'\U0001f62b++,++but++i', u'\U0001f62b++,++but++i', 4, 1, None, None, None, None, None, None), (u'realy++bad++surprise++for++me++\U0001f62b', u'reali++bad++surpris++for++me++\U0001f62b', 6, 1, None, None, None, None, None, None), (u':p++=)++\U0001f600', u':p++=)++\U0001f600', 3, 1, None, None, None, None, None, None), (u'me++\U0001f62b', u'me++\U0001f62b', 2, 1, None, None, None, None, None, None), (u'realy++liked++it++:p', u'reali++like++it++:p', 4, 1, None, None, None, None, None, None), (u'it++:p++=)++\U0001f600++\U0001f308++\U0001f600', u'it++:p++=)++\U0001f600++\U0001f308++\U0001f600', 6, 1, None, None, None, None, None, None), (u'\U0001f62b', u'\U0001f62b', 1, 3, u'3', u'3', None, None, u'3', None), (u'but++i++realy', u'but++i++reali', 3, 1, None, None, None, None, None, None), (u':p++=)++\U0001f600++\U0001f308', u':p++=)++\U0001f600++\U0001f308', 4, 1, None, None, None, None, None, None), (u'bad', u'bad', 1, 6, u'4', u'7', u'1', u'5', u'4', u'1'), (u'surprise++for++me++\U0001f62b++,', u'surpris++for++me++\U0001f62b++,', 5, 1, None, None, None, None, None, None), (u'surprise++for++me++\U0001f62b++,++but', u'surpris++for++me++\U0001f62b++,++but', 6, 1, None, None, None, None, None, None), (u'liked++it++:p++=)', u'like++it++:p++=)', 4, 1, None, None, None, None, None, None), (u'realy++liked++it++:p++=)++\U0001f600', u'reali++like++it++:p++=)++\U0001f600', 6, 1, None, None, None, None, None, None), (u',++but++i++realy++liked', u',++but++i++reali++like', 5, 1, None, None, None, None, None, None), (u'for++me++\U0001f62b++,++but++i', u'for++me++\U0001f62b++,++but++i', 6, 1, None, None, None, None, None, None), (u'i++realy', u'i++reali', 2, 1, None, None, None, None, None, None), (u',++but++i++realy++liked++it', u',++but++i++reali++like++it', 6, 1, None, None, None, None, None, None)]
# assert right_baseline_not_freezed_not_full_repetativ != right_baseline_not_freezed_full_repetativ
# assert right_baseline_freezed_not_full_repetativ != right_baseline_freezed_full_repetativ
# assert right_baseline_not_freezed_not_full_repetativ != right_baseline_freezed_not_full_repetativ
# assert right_baseline_not_freezed_not_full_repetativ != right_baseline_freezed_full_repetativ
# assert right_baseline_not_freezed_full_repetativ != right_baseline_freezed_not_full_repetativ
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
precomputed_data = self.configer._counted_reps["en"]
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
########################################################################################
##################################################################
######## full_repetativ_syntagma=False
########################################################################################
########################################################################################
import sys
#self.mode = "prod+"
#####NOT FREEZED #####
#### baseline_insertion_border=10 ####
stats = Stats(mode=self.mode,use_cash=True,status_bar=True)#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, baseline_delimiter="++",
encryption_key=encryption_key, full_repetativ_syntagma=False,
ignore_hashtag=False, force_cleaning=False,
ignore_url=False, ignore_mention=False, ignore_punkt=False, ignore_num=False)
stats.compute(corp,stream_number=1,adjust_to_cpu=False, freeze_db=False, baseline_insertion_border=10)
baseline = stats.statsdb.getall("baseline")
repls = stats.statsdb.getall("replications")
redus = stats.statsdb.getall("reduplications")
#p(baseline,"baseline")
#p(repls, "repls")
#p(redus, "redus")
#p(baseline,"baseline")
#sys.exit()
repls.should.be.equal(right_repls)
redus.should.be.equal(right_redus)
#sorted(baseline).should.be.equal(sorted(right_baseline_not_freezed_not_full_repetativ))
#baseline.should.be.equal(right_baseline_not_freezed_not_full_repetativ)
#
self.configer.right_rep_num["en"]["repls"].should.be.equal(len(repls))
self.configer.right_rep_num["en"]["redus"].should.be.equal(len(redus))
self._check_correctnes(stats.col_index_orig,precomputed_data,repls=repls, redus=redus, baseline=baseline)
bas_synts = [bs[0] for bs in baseline]
for r in redus:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
for r in repls:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
# #####baseline_insertion_border=10000000
stats = Stats(mode=self.mode,use_cash=True,status_bar=True)#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, baseline_delimiter="++",
encryption_key=encryption_key,
ignore_hashtag=False, force_cleaning=False,
ignore_url=False, ignore_mention=False, ignore_punkt=False, ignore_num=False)
stats.compute(corp,stream_number=1,adjust_to_cpu=False, freeze_db=False, baseline_insertion_border=100000000)
baseline = stats.statsdb.getall("baseline")
repls = stats.statsdb.getall("replications")
redus = stats.statsdb.getall("reduplications")
repls.should.be.equal(right_repls)
redus.should.be.equal(right_redus)
#sorted(baseline).should.be.equal(sorted(right_baseline_not_freezed_not_full_repetativ))
#p(baseline,"baseline")
self.configer.right_rep_num["en"]["repls"].should.be.equal(len(repls))
self.configer.right_rep_num["en"]["redus"].should.be.equal(len(redus))
self._check_correctnes(stats.col_index_orig,precomputed_data,repls=repls, redus=redus, baseline=baseline)
bas_synts = [bs[0] for bs in baseline]
for r in redus:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
for r in repls:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
#–––––––––––––––––––––––––––––––
#–––––––––––––––––––––––––––––––
#–––––––––––––––––––––––––––––––
##### FREEZED #####
stats = Stats(mode=self.mode,use_cash=True,status_bar=True)#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, baseline_delimiter="++",
encryption_key=encryption_key,
ignore_hashtag=False, force_cleaning=False,
ignore_url=False, ignore_mention=False, ignore_punkt=False, ignore_num=False)
stats.compute(corp,stream_number=1,adjust_to_cpu=False, freeze_db=True, baseline_insertion_border=10)
baseline = stats.statsdb.getall("baseline")
repls = stats.statsdb.getall("replications")
redus = stats.statsdb.getall("reduplications")
#p(list(stats.get_data([u':hashtag:'], repl=True, redu=True, baseline=True)))
#p(baseline,"right_baseline_freezed_not_full_repetativ")
#sys.exit()
repls.should.be.equal(right_repls)
redus.should.be.equal(right_redus)
#sorted(baseline).should.be.equal(sorted(right_baseline_freezed_not_full_repetativ))
#
self.configer.right_rep_num["en"]["repls"].should.be.equal(len(repls))
self.configer.right_rep_num["en"]["redus"].should.be.equal(len(redus))
self._check_correctnes(stats.col_index_orig,precomputed_data,repls=repls, redus=redus, baseline=baseline)
bas_synts = [bs[0] for bs in baseline]
for r in redus:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
for r in repls:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
# ########################################################################################
# ##################################################################
# ######## full_repetativ_syntagma=True
# ########################################################################################
# ########################################################################################
#self.mode = "prod+"
#####NOT FREEZED #####
#### baseline_insertion_border=10 ####
stats = Stats(mode=self.mode,use_cash=True,status_bar=True)#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, baseline_delimiter="++",
encryption_key=encryption_key, full_repetativ_syntagma=True,
ignore_hashtag=False, force_cleaning=False,
ignore_url=False, ignore_mention=False, ignore_punkt=False, ignore_num=False)
stats.compute(corp,stream_number=1,adjust_to_cpu=False, freeze_db=False, baseline_insertion_border=10)
baseline = stats.statsdb.getall("baseline")
repls = stats.statsdb.getall("replications")
redus = stats.statsdb.getall("reduplications")
#p(repls,"repls")
#p(redus, "redus")
repls.should.be.equal(right_repls)
redus.should.be.equal(right_redus)
#sorted(baseline).should.be.equal(sorted(right_baseline_not_freezed_full_repetativ))
#p(baseline,"right_baseline_not_freezed_full_repetativ")
#sys.exit()
self.configer.right_rep_num["en"]["repls"].should.be.equal(len(repls))
self.configer.right_rep_num["en"]["redus"].should.be.equal(len(redus))
self._check_correctnes(stats.col_index_orig,precomputed_data,repls=repls, redus=redus, baseline=baseline)
bas_synts = [bs[0] for bs in baseline]
for r in redus:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
for r in repls:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
# #–––––––––––––––––––––––––––––––
##### FREEZED #####
stats = Stats(mode=self.mode,use_cash=True,status_bar=True)#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, baseline_delimiter="++",
encryption_key=encryption_key,full_repetativ_syntagma=True,
ignore_hashtag=False, force_cleaning=False,
ignore_url=False, ignore_mention=False, ignore_punkt=False, ignore_num=False)
stats.compute(corp,stream_number=1,adjust_to_cpu=False, freeze_db=True, baseline_insertion_border=10)
baseline = stats.statsdb.getall("baseline")
repls = stats.statsdb.getall("replications")
redus = stats.statsdb.getall("reduplications")
#p(list(stats.get_data([u':hashtag:'], repl=True, redu=True, baseline=True)))
repls.should.be.equal(right_repls)
redus.should.be.equal(right_redus)
#sorted(baseline).should.be.equal(sorted(right_baseline_freezed_full_repetativ))
#p(baseline,"right_baseline_freezed_full_repetativ")
self.configer.right_rep_num["en"]["repls"].should.be.equal(len(repls))
self.configer.right_rep_num["en"]["redus"].should.be.equal(len(redus))
self._check_correctnes(stats.col_index_orig,precomputed_data,repls=repls, redus=redus, baseline=baseline)
bas_synts = [bs[0] for bs in baseline]
for r in redus:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
for r in repls:
if r[5] not in bas_synts:
p(r[5],"ERROR", c="r")
assert False
def _check_correctnes(self,indexes, precomputed_data,repls=False, redus=False, baseline=False):
import copy
### Step 1: Summerizing
dict_repls = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda: 0)))
dict_redus = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda: 0)))
dict_baseline = defaultdict()
if repls:
ix_repl = indexes["repl"]
for r in repls:
doc_id = r[ix_repl["doc_id"]]
index_in_corpus = r[ix_repl["index_in_corpus"]]
word = r[ix_repl["normalized_word"]]
dict_repls[word][doc_id][index_in_corpus] += 1
if redus:
ix_redu = indexes["redu"]
for r in redus:
doc_id = r[ix_redu["doc_id"]]
index_in_corpus = r[ix_redu["index_in_corpus"]]
word = r[ix_redu["normalized_word"]]
redu_length = r[ix_redu["redu_length"]]
dict_redus[word][doc_id][index_in_corpus] += redu_length
#p(baseline)
if baseline:
ix_b = indexes["baseline"]
for b in baseline:
syntagma = b[ix_b["syntagma"]]
scope = b[ix_b["scope"]]
occur_syntagma_all = b[ix_b["occur_syntagma_all"]]
if int(scope) == 1:
dict_baseline[syntagma] = occur_syntagma_all
##### Step 2: Counts
computed_counts = defaultdict(lambda:defaultdict(lambda:[0,0]))
if repls:
for word, word_data in dict_repls.items():
for doc_id, doc_data in word_data.items():
#current_doc_id = doc_id
for index_in_corpus, counter in doc_data.items():
#if current_doc_id == doc_id:
# if
computed_counts[word]["repl"][0] += 1
computed_counts[word]["repl"][1] += counter
if redus:
for word, word_data in dict_redus.items():
for doc_id, doc_data in word_data.items():
for index_in_corpus, counter in doc_data.items():
computed_counts[word]["redu"][0] += 1
computed_counts[word]["redu"][1] += counter
if baseline:
for syntagma, counter in dict_baseline.items():
computed_counts[syntagma]["baseline"] = counter
### Step 3: Comparation
precounted_reps = precomputed_data
computed_counts = { word:{ phanomen: tuple(counter) if isinstance(counter, (list, tuple)) else counter for phanomen, counter in data.items()} for word, data in computed_counts.items() }
precounted_reps = { word:{ phanomen: tuple(counter) if isinstance(counter, (list, tuple)) else counter for phanomen, counter in data.items()} for word, data in precounted_reps.items() }
copy_precounted_reps = copy.deepcopy(precounted_reps)
copy_computed_counts = copy.deepcopy(computed_counts)
if (repls and baseline) or (redus and baseline):
computed_counts = { word:data for word, data in computed_counts.items() if ("repl" in data and "baseline" in data) or ("redu" in data and "baseline" in data) }
#p((computed_counts))
for word, data in precounted_reps.items():
for phanomen, counts in data.items():
if phanomen == "baseline":
if baseline:
if counts != computed_counts[word][phanomen]:
precomputed = counts
extracted = computed_counts[word][phanomen]
#p((word, precomputed,extracted), "ERROR",c="c")
#assert False
else:
del copy_computed_counts[word][phanomen]
del copy_precounted_reps[word][phanomen]
else:
if not (phanomen == "repl" and repls):
continue
elif not (phanomen == "redu" and redus):
continue
if tuple(counts) != tuple(computed_counts[word][phanomen]):
precomputed = tuple(counts)
extracted = tuple(computed_counts[word][phanomen])
#p((word, precomputed,extracted), "ERROR",c="c")
##assert False
else:
del copy_computed_counts[word][phanomen]
del copy_precounted_reps[word][phanomen]
# for item in sorted(computed_counts.items()):
# print " " + str(item)
# p("fghjk\n", c="r")
# for item in sorted(precounted_reps.items()):
# print " " + str(item)
for word, data in precounted_reps.items():
if computed_counts[word] == data:
del copy_computed_counts[word]
del copy_precounted_reps[word]
else:
msg = u"Not Equal Data for word: '{}' >>>> '{}' != '{}' <<<<".format(word, data, computed_counts[word])
#p(msg)
copy_computed_counts = {word:data for word, data in copy_computed_counts.items() if len(data)>1}
if copy_computed_counts:
#p(copy_computed_counts, "copy_computed_counts")
assert False
copy_computed_counts = {word:data for word, data in copy_precounted_reps.items() if len(data)>1}
if copy_precounted_reps:
#p(copy_precounted_reps, "copy_precounted_reps")
assert False
assert True
def pretty_print_uniq(self,item, syn_order=False, baseline_small=True ):
if syn_order:
#print "fghjk"
print "\n\n\n"
for k,v in item.iteritems():
#print "fghjk111"
#print v
print "\n"
#print "--------------- {} -------------------".format(k)
#p((k,v))
if v and k not in ["syntagma", "baseline", "stem_syn"]:
main_open_tag = "(" if isinstance(v, tuple) else "["
print "\t\tright_{} = {}".format(k,main_open_tag)
if len(v) == 3 and v[1] in [True, False]:
main_open_tag1 ="(" if isinstance( v[0], tuple) else "["
print "\t\t\t\t\t\t {}".format(main_open_tag1)
for data_for_syntagmas_part in v[0]:
word = data_for_syntagmas_part[0]
reps = data_for_syntagmas_part[1]
#p((word, reps))
#p(data_for_syntagmas_part, "data_for_syntagmas_part")
#print "%%"
main_open_tag_2 = "(" if isinstance(data_for_syntagmas_part, tuple) else "["
open_tag = "(" if isinstance(reps, tuple) else "["
#print "\t\t\t\t\t {}, {}".format(repr(word),open_tag)
print "\t\t\t\t\t\t\t{}{}, {}".format( main_open_tag_2,repr(word),open_tag)
# #print open_tag
# open_tag = "(" if isinstance(reps[0], tuple) else "["
# print "\t\t\t\t\t\t\t\t\t {}".format(open_tag)
for row in reps:
print "\t\t\t\t\t\t\t\t\t\t\t {},".format(row)
#for row in rows:
# print "\t\t\t\t\t\t\t\t {},".format(row)
# close_tag = ")" if isinstance(reps[0], tuple) else "]"
# print "\t\t\t\t\t\t\t\t\t {},".format(close_tag)
# print "\t\t\t\t\t\t\t\t\t {},".format(reps[1])
# print "\t\t\t\t\t\t\t\t\t {},".format(reps[2])
main_close_tag_2 = ")" if isinstance(data_for_syntagmas_part, tuple) else "]"
close_tag = ")" if isinstance(reps, tuple) else "]"
# #print "\t\t\t\t\t\t {}".format(close_tag)
print "\t\t\t\t\t\t\t\t\t\t {}\n\t\t\t\t\t\t\t\t {},".format(close_tag,main_close_tag_2)
# main_close_tag = ")" if isinstance(v, tuple) else "]"
# print "\t\t\t {}".format(main_close_tag)
main_close_tag1 =")" if isinstance( v[0], tuple) else "]"
print "\t\t\t\t\t\t {},".format(main_close_tag1)
print "\t\t\t\t\t\t {},".format(v[1])
print "\t\t\t\t\t\t {},".format(v[2])
else:
#p("fghjkl")
for data in v:
word = data[0]
reps = data[1]
#print "ff§"
main_open_tag_2 = "(" if isinstance(data, tuple) else "["
open_tag = "(" if isinstance(reps, tuple) else "["
#print "\t\t\t\t\tright_{} = {}".format(k,open_tag)
print "\t\t\t\t\t{}{}, {}".format( main_open_tag_2,repr(word),open_tag)
#print open_tag
#p(reps,"reps")
for row in reps:
print "\t\t\t\t\t\t\t\t {},".format(row)
close_tag = ")" if isinstance(reps, tuple) else "]"
main_close_tag_2 = ")" if isinstance(data, tuple) else "]"
print "\t\t\t\t\t\t\t {}\n\t\t\t\t\t {},".format(close_tag,main_close_tag_2)
#print "\t\t\t\t {}".format(main_close_tag_2)
main_close_tag = ")" if isinstance(v, tuple) else "]"
print "\t\t\t\t {}".format(main_close_tag)
else:
#print "fghjk333"
#open_tag = "(" if isinstance(reps, tuple) else "["
#close_tag = ")" if isinstance(reps, tuple) else "]"
#print "\t\tright_{} = {} {} {}".format(k,open_tag, reps, close_tag)
print "\t\tright_{} = {}".format(k, v)
else:
print "\n\n\n"
for k,v in item.iteritems():
#print v
print "\n"
#print "--------------- {} -------------------".format(k)
l = len(v)
#p((l,v))
if len(v) >= 2 and k not in ["syntagma", "stem_syn"]:
if k == "baseline" and baseline_small:
print "\t\tright_{} = {}".format(k, v,)
continue
if len(v) == 3 and v[1] in [True, False]:
open_tag = "(" if isinstance(v, tuple) else "["
print "\t\tright_{} = {}".format(k,open_tag)
#print open_tag
open_tag = "(" if isinstance(v[0], tuple) else "["
print "\t\t\t\t\t\t {}".format(open_tag)
for row in v[0]:
print "\t\t\t\t\t\t\t {},".format(row)
#for row in rows:
# print "\t\t\t\t\t {},".format(row)
close_tag = ")" if isinstance(v[0], tuple) else "]"
print "\t\t\t\t\t\t {},".format(close_tag)
print "\t\t\t\t\t\t {},".format(v[1])
print "\t\t\t\t\t\t {},".format(v[2])
close_tag = ")" if isinstance(v, tuple) else "]"
print "\t\t\t {}".format(close_tag)
else:
open_tag = "(" if isinstance(v, tuple) else "["
print "\t\tright_{} = {}".format(k,open_tag)
#print open_tag
for row in v:
print "\t\t\t\t\t {},".format(row)
close_tag = ")" if isinstance(v, tuple) else "]"
print "\t\t\t\t {}".format(close_tag)
else:
#open_tag = "(" if isinstance(v, tuple) else "["
#close_tag = ")" if isinstance(v, tuple) else "]"
#print "\t\tright_{} = {} {} {}".format(k,open_tag, v, close_tag)
print "\t\tright_{} = {}".format(k, v,)
def _summerize_reps(self,indexes,repls, redus, baseline):
import copy
### Step 1: Summerizing
dict_repls = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda: 0)))
dict_redus = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda: 0)))
dict_baseline = defaultdict()
if repls:
ix_repl = indexes["repl"]
for r in repls:
doc_id = r[ix_repl["doc_id"]]
index_in_corpus = r[ix_repl["index_in_corpus"]]
word = r[ix_repl["normalized_word"]]
dict_repls[word][doc_id][index_in_corpus] += 1
if redus:
ix_redu = indexes["redu"]
for r in redus:
doc_id = r[ix_redu["doc_id"]]
index_in_corpus = r[ix_redu["index_in_corpus"]]
word = r[ix_redu["normalized_word"]]
redu_length = r[ix_redu["redu_length"]]
dict_redus[word][doc_id][index_in_corpus] += redu_length
#p(baseline)
if baseline:
ix_b = indexes["baseline"]
for b in baseline:
syntagma = b[ix_b["syntagma"]][0]
scope = b[ix_b["scope"]]
occur_syntagma_all = b[ix_b["occur_syntagma_all"]]
if int(scope) == 1:
dict_baseline[syntagma] = occur_syntagma_all
##### Step 2: Counts
computed_counts = defaultdict(lambda:defaultdict(lambda:[0,0]))
if repls:
for word, word_data in dict_repls.items():
for doc_id, doc_data in word_data.items():
#current_doc_id = doc_id
for index_in_corpus, counter in doc_data.items():
#if current_doc_id == doc_id:
# if
computed_counts[word]["repl"][0] += 1
computed_counts[word]["repl"][1] += counter
if redus:
for word, word_data in dict_redus.items():
for doc_id, doc_data in word_data.items():
for index_in_corpus, counter in doc_data.items():
computed_counts[word]["redu"][0] += 1
computed_counts[word]["redu"][1] += counter
if baseline:
for syntagma, counter in dict_baseline.items():
computed_counts[syntagma]["baseline"] = counter
out_repls = computed_counts[word]["repl"] if repls else None
out_redus = computed_counts[word]["redu"] if redus else None
out_baseline = computed_counts[syntagma]["baseline"] if baseline else None
output = {}
if out_repls and out_repls[0] > 0:
output["repl"] = tuple(out_repls)
if out_redus and out_redus[0] > 0:
output["redu"] = tuple(out_redus)
if out_baseline and out_baseline > 0:
output["baseline"] = out_baseline
return output
def _summerize_reps2(self,indexes,repls, redus, baseline):
import copy
### Step 1: Summerizing
dict_repls = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda: 0)))
dict_redus = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda: 0)))
dict_baseline = defaultdict()
if repls:
ix_repl = indexes["repl"]
for r in repls:
doc_id = r[ix_repl["doc_id"]]
index_in_corpus = r[ix_repl["index_in_corpus"]]
word = r[ix_repl["normalized_word"]]
dict_repls[word][doc_id][index_in_corpus] += 1
if redus:
ix_redu = indexes["redu"]
for r in redus:
doc_id = r[ix_redu["doc_id"]]
index_in_corpus = r[ix_redu["index_in_corpus"]]
word = r[ix_redu["normalized_word"]]
redu_length = r[ix_redu["redu_length"]]
dict_redus[word][doc_id][index_in_corpus] += redu_length
#p(baseline)
if baseline:
ix_b = indexes["baseline"]
for b in baseline:
syntagma = tuple(b[ix_b["syntagma"]])
scope = b[ix_b["scope"]]
occur_syntagma_all = b[ix_b["occur_syntagma_all"]]
#if int(scope) == 1:
dict_baseline[syntagma] = occur_syntagma_all
##### Step 2: Counts
computed_counts = defaultdict(lambda:defaultdict(lambda:[0,0]))
if repls:
for word, word_data in dict_repls.items():
for doc_id, doc_data in word_data.items():
#current_doc_id = doc_id
for index_in_corpus, counter in doc_data.items():
#if current_doc_id == doc_id:
# if
computed_counts["repl"][word][0] += 1
computed_counts["repl"][word][1] += counter
if redus:
for word, word_data in dict_redus.items():
for doc_id, doc_data in word_data.items():
for index_in_corpus, counter in doc_data.items():
computed_counts["redu"][word][0] += 1
computed_counts["redu"][word][1] += counter
if baseline:
#p(baseline,"baseline")
for syntagma, counter in dict_baseline.items():
#p(syntagma)
computed_counts["baseline"][tuple(syntagma)] = counter
return { phanomen:{word:counts for word, counts in data.items()} for phanomen, data in computed_counts.items()}
@attr(status='stable')
# @wipd
def test_get_data_for_one_syntagma_compared_with_gold_stabdard_611_0(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#### DE ######
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True)#, )
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_en))
gold_standard_data = self.configer._counted_reps["en"]
################################################################################################################################################
################################################ I. FullRepetativnes= True #################################################################################
########################################################################################################################################
stats.recompute_syntagma_repetativity_scope(True)
## Case 1###
syntagma = ["bad"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = gold_standard_data[syntagma[0]]
#p(right_data)
answer = self._summerize_reps(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
repl_num = right_data["repl"][1] #= sum([counts[1] for word, counts in right_data["repl"] ])
redu_num = right_data["redu"][0] #sum([counts[0] for word, counts in right_data["redu"].items() ])
right_data.should.be.equal(answer)
len(item["repl"]).should.be.equal(repl_num)
len(item["redu"]).should.be.equal(redu_num)
## Case 2###
syntagma = ["-("]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = gold_standard_data[syntagma[0]]
answer = self._summerize_reps(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
## Case 3###
syntagma = ["-)"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = gold_standard_data[syntagma[0]]
answer = self._summerize_reps(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
## Case 4###
syntagma = ["=)"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = gold_standard_data[syntagma[0]]
answer = self._summerize_reps(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
## Case 5###
syntagma = ["."]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = gold_standard_data[syntagma[0]]
answer = self._summerize_reps(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
## Case 6###
syntagma = [u'\U0001f600']
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = gold_standard_data[syntagma[0]]
answer = self._summerize_reps(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
## Case 7###
syntagma = [u'\U0001f62b']
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = gold_standard_data[syntagma[0]]
answer = self._summerize_reps(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
## Case 8###
syntagma = [ u'but']
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = gold_standard_data[syntagma[0]]
answer = self._summerize_reps(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
## Case 9###
syntagma = [u'se']
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = gold_standard_data[syntagma[0]]
answer = self._summerize_reps(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
## Case 10###
syntagma = [u'big']
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = gold_standard_data[syntagma[0]]
answer = self._summerize_reps(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
## Case 11###
syntagma = [u'se']
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = gold_standard_data[syntagma[0]]
answer = self._summerize_reps(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
## Case 12###
syntagma = [u'right']
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = gold_standard_data[syntagma[0]]
answer = self._summerize_reps(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
## Case 12###
syntagma = [u'EMOASC']
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos")
#self.pretty_print_uniq(item)
right_data = {
'repl': {
u'=)': [1, 1],
u':-(': [2, 2],
u'-)': [1, 1],
u'-(': [1, 1]},
'baseline': {
(u':-(', u'@real_trump', u'#shetlife'): 1,
(u'-(', u'\U0001f62b', u':-(', u'#shetlife', u'http://www.noooo.com'): 1,
(u'=)',): 1, (u':-(',): 2,
(u':-(', u'@real_trump', u'#shetlife', u'#readytogo'): 1,
(u':-(', u'#shetlife', u'http://www.noooo.com'): 1,
(u'=)', u'\U0001f600', u'\U0001f308'): 1,
(u'=)', u'\U0001f600', u'\U0001f308', u'\U0001f600'): 1,
(u'=)', u'\U0001f600'): 1, (u':-(', u'@real_trump'): 1,
(u'-(', u'\U0001f62b'): 1,
(u'-)',): 1,
(u'-(',): 1,
(u'-(', u'\U0001f62b', u':-('): 1,
(u':-(', u'@real_trump', u'#shetlife', u'#readytogo', u'http://www.absurd.com'): 1,
(u'-(', u'\U0001f62b', u':-(', u'#shetlife'): 1,
(u':-(', u'#shetlife'): 1}}
repl_num = sum([counts[1] for word, counts in right_data["repl"].items() ])
answer = self._summerize_reps2(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
len(item["repl"]).should.be.equal(repl_num)
## Case 12###
syntagma = [u'number']
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos")
#self.pretty_print_uniq(item)
right_data = {
'repl': {u'1': [2, 2]},
'baseline': {(u'1', u'.', u'but', u'you', u'but'): 1, (u'1', u'\U0001f62b', u'1', u'.'): 1, (u'1', u'\U0001f62b', u'1', u'.', u'but', u'you'): 1, (u'1', u'\U0001f62b'): 1, (u'1', u'.', u'but'): 1, (u'1', u'\U0001f62b', u'1', u'.', u'but'): 1, (u'1', u'.', u'but', u'you', u'but', u'you'): 1, (u'1',): 2, (u'1', u'.', u'but', u'you'): 1, (u'1', u'\U0001f62b', u'1'): 1, (u'1', u'.'): 1}}
repl_num = sum([counts[1] for word, counts in right_data["repl"].items() ])
answer = self._summerize_reps2(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
len(item["repl"]).should.be.equal(repl_num)
## Case 15###
syntagma = ["very","pity"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = {
"repl":{
"very": [2,4],
"pity": [2,4],
},
"redu":{
"very": [1,3],
"pity": [1,4],
},
"baseline":{
("very","pity"): 1,
},
}
repl_num = sum([counts[1] for word, counts in right_data["repl"].items() ])
redu_num = sum([counts[0] for word, counts in right_data["redu"].items() ])
answer = self._summerize_reps2(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data["repl"].should.be.equal(answer["repl"])
right_data["redu"].should.be.equal(answer["redu"])
right_data["baseline"].should.be.equal(answer["baseline"])
len(item["repl"]).should.be.equal(repl_num)
len(item["redu"]).should.be.equal(redu_num)
#right_data.should.be.equal(answer)
## Case 14###
syntagma = ["bad","news"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
#self.pretty_print_uniq(item)
right_data = {
"baseline":{
("bad","news"): 1,
},
}
answer = self._summerize_reps2(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
## Case 15###
syntagma = ["but","you"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
#self.pretty_print_uniq(item)
right_data = {
'repl': {
u'you': [6, 8],
u'but': [10, 15]},
'baseline': {
(u'but', u'you'): 4},
'redu': {
u'you': [2, 4],
u'but': [2, 4]}
}
repl_num = sum([counts[1] for word, counts in right_data["repl"].items() ])
redu_num = sum([counts[0] for word, counts in right_data["redu"].items() ])
answer = self._summerize_reps2(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
len(item["repl"]).should.be.equal(repl_num)
len(item["redu"]).should.be.equal(redu_num)
## Case 16###
syntagma = [u"😀",u"🌈"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
#self.pretty_print_uniq(item)
right_data = {
'repl':
{u'\U0001f600': [3, 3],
u'\U0001f308': [3, 3]},
'baseline': {
(u'\U0001f600', u'\U0001f308'): 3}}
repl_num = sum([counts[1] for word, counts in right_data["repl"].items() ])
#redu_num = sum([counts[0] for word, counts in right_data["redu"].items() ])
answer = self._summerize_reps2(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
len(item["repl"]).should.be.equal(repl_num)
#len(item["redu"]).should.be.equal(redu_num)
## Case 34###
syntagma = [u"🌈",u"😀"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
#self.pretty_print_uniq(item)
right_data = {
'repl': {
u'\U0001f600': [2, 2],
u'\U0001f308': [2, 2]},
'baseline': {
(u'\U0001f308', u'\U0001f600'): 3}
}
repl_num = sum([counts[1] for word, counts in right_data["repl"].items() ])
answer = self._summerize_reps2(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
len(item["repl"]).should.be.equal(repl_num)
stats.recompute_syntagma_repetativity_scope(False)
## Case 30###
syntagma = ["bad","news"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = {
'repl': {
u'bad': [4, 7]},
'baseline': {
(u'bad', u'news'): 1},
'redu': {
u'bad': [1, 5]}}
answer = self._summerize_reps2(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
## Case 31###
syntagma = ["tiny","model"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
right_data = {
'repl': {u'model': [1, 2]},
'baseline': {(u'tiny', u'model'): 2},
'redu': {u'tiny': [1, 6]}}
answer = self._summerize_reps2(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
# #p(answer, "answer")
right_data.should.be.equal(answer)
## Case 15###
syntagma = ["but","you"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
#self.pretty_print_uniq(item)
right_data = {
'repl': {
u'you': [6, 8],
u'but': [10, 15]},
'baseline': {
(u'but', u'you'): 4},
'redu': {
u'you': [2, 4],
u'but': [4, 10]}}
repl_num = sum([counts[1] for word, counts in right_data["repl"].items() ])
redu_num = sum([counts[0] for word, counts in right_data["redu"].items() ])
answer = self._summerize_reps2(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
len(item["repl"]).should.be.equal(repl_num)
len(item["redu"]).should.be.equal(redu_num)
## Case 33###
syntagma = [u"😀",u"🌈"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
#self.pretty_print_uniq(item)
right_data = {
'repl':
{u'\U0001f600': [3, 3],
u'\U0001f308': [3, 3]},
'baseline': {
(u'\U0001f600', u'\U0001f308'): 3}}
repl_num = sum([counts[1] for word, counts in right_data["repl"].items() ])
#redu_num = sum([counts[0] for word, counts in right_data["redu"].items() ])
answer = self._summerize_reps2(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
len(item["repl"]).should.be.equal(repl_num)
#len(item["redu"]).should.be.equal(redu_num)
## Case 34###
syntagma = [u"🌈",u"😀"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
#self.pretty_print_uniq(item)
right_data = {
'repl': {
u'\U0001f600': [2, 2],
u'\U0001f308': [3, 3]},
'baseline': {
(u'\U0001f308', u'\U0001f600'): 3}
}
repl_num = sum([counts[1] for word, counts in right_data["repl"].items() ])
answer = self._summerize_reps2(stats.col_index_orig, item["repl"], item["redu"],item["baseline"])
#p(answer, "answer")
right_data.should.be.equal(answer)
len(item["repl"]).should.be.equal(repl_num)
def convert_all_lists_to_tuples(self, giv_object):
new_obj = []
for item in giv_object:
try:
new_item = []
for underitem in item:
new_item.append(tuple(underitem) )
new_obj.append(tuple(new_item))
except:
try:
new_obj.append(tuple(item))
except:
new_obj.append(item)
return tuple(new_obj)
@attr(status='stable')
#@wipd
def test_get_data_for_one_syntagma_611_1(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#### DE ######
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True)#, )
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_de))
################################################################################################################################################
################################################ I. FullRepetativnes= True #################################################################################
########################################################################################################################################
stats.recompute_syntagma_repetativity_scope(True)
################################################################################################################################################
################################################################################################################################################
##### return_full_tuple = False #######################################################################################################+
###########################################################################################################################################
################################################################################################################################################
############################
####### SCOPE 1 ##############
############################
###################### syntagma_type="lexem" #############################
# ########stemmed_search=False #
#p(stats.statsdb.rownum("replications"))
### Case 1.1:
syntagma = ["klitze"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
#self.pretty_print_uniq(item)
#p(item,"item")
#p(stats.statsdb.rownum("replications"))
extracted_repl = item["repl"]
extracted_redu = item["redu"]
extracted_baseline = item["baseline"]
extracted_syntagma = item["syntagma"]
right_repl = [
(54, 11111, u'[5, 6, 15, 3]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'klitz', u'e', 4, 5, None, u'VAPPER', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'sache', u'["NN", null, "sach"]', u'.', u'["symbol", null, "."]', u'die', u'["PDS", null, "die"]', u'aber', u'["ADV", null, "aber"]'),
(1, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'i', 4, 2, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'e', 7, 5, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(20, 10000, u'[12, 3, 8]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'klitz', u'e', 4, 5, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]'),
]
right_syntagma = ['klitze']
right_baseline = [[[u'klitze'], u'klitz', 1, 8, u'3', u'4', u'2', u'6', u'3', u'2']]
right_redu = [
(18, 12222, u'[24]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitz', u'{"klitze": 4}', 4, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u',', u'["symbol", null, ","]', u'die', u'["PRELS", null, "die"]', u'ich', u'["PPER", null, "ich"]'),
(1, 8888, u'[4, 11]', u'[0, 0]', u'[0, 0]', u'klitze', u'klitz', u'{"klitze": 1, "kli^4tze^7": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
##item.should.be.equal({"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma})
# ########stemmed_search=False #
### Case 1.2:
syntagma = ["kleine"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
#self.pretty_print_uniq(item)
#p(item,"item")
extracted_repl = item["repl"]
extracted_redu = item["redu"]
extracted_baseline = item["baseline"]
extracted_syntagma = item["syntagma"]
right_repl = [
(82, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 4, 2, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(83, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'i', 5, 3, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(84, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(85, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 8, 5, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(3, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(4, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'n', 5, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(5, 8888, u'[4, 11]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
(57, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(58, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(59, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 5, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
]
right_syntagma = ['kleine']
right_baseline = [[[u'kleine'], u'klein', 1, 7, u'5', u'11', u'1', u'2', u'5', u'1']]
right_redu = [(2, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5in^5e": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]')]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
##item.should.be.equal({"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma})
# ########stemmed_search=True #
### Case 1.3:
syntagma = ["klein"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem",stemmed_search=True)
#self.pretty_print_uniq(item)
#p(item,"item")
extracted_repl = item["repl"]
extracted_redu = item["redu"]
extracted_baseline = item["baseline"]
extracted_syntagma = item["syntagma"]
right_repl = [
(3, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(4, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'n', 5, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(5, 8888, u'[4, 11]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
(26, 10000, u'[12, 3, 8]', u'[1, 0]', u'[1, 0]', u'kleines', u'kleine^4s^7', u'klein', u'e', 4, 5, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(27, 10000, u'[12, 3, 8]', u'[1, 0]', u'[1, 0]', u'kleines', u'kleine^4s^7', u'klein', u's', 7, 6, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(28, 10000, u'[12, 3, 8]', u'[1, 1]', u'[1, 0]', u'kleines', u'klein^4e^3s^4', u'klein', u'n', 4, 4, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(29, 10000, u'[12, 3, 8]', u'[1, 1]', u'[1, 0]', u'kleines', u'klein^4e^3s^4', u'klein', u'e', 3, 5, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(30, 10000, u'[12, 3, 8]', u'[1, 1]', u'[1, 0]', u'kleines', u'klein^4e^3s^4', u'klein', u's', 4, 6, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(31, 10000, u'[12, 3, 8]', u'[1, 2]', u'[1, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u'e', 4, 2, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(32, 10000, u'[12, 3, 8]', u'[1, 2]', u'[1, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u'i', 5, 3, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(33, 10000, u'[12, 3, 8]', u'[1, 2]', u'[1, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u'n', 3, 4, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(34, 10000, u'[12, 3, 8]', u'[1, 2]', u'[1, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u's', 3, 6, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(37, 10000, u'[12, 3, 8]', u'[2, 0]', u'[2, 0]', u'kleinere', u'kleinere^5', u'klein', u'e', 5, 7, u'[2, 0]', u'NE', u'["neutral", 0.0]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]'),
(38, 10000, u'[12, 3, 8]', u'[2, 1]', u'[2, 0]', u'kleinere', u'kleine^3r^2e^5', u'klein', u'e', 3, 5, u'[2, 0]', u'NE', u'["neutral", 0.0]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]'),
(39, 10000, u'[12, 3, 8]', u'[2, 1]', u'[2, 0]', u'kleinere', u'kleine^3r^2e^5', u'klein', u'e', 5, 7, u'[2, 0]', u'NE', u'["neutral", 0.0]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]'),
(45, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'kle^3i^3n^3', u'klein', u'e', 3, 2, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(46, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'kle^3i^3n^3', u'klein', u'i', 3, 3, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(47, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'kle^3i^3n^3', u'klein', u'n', 3, 4, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(48, 10000, u'[12, 3, 8]', u'[2, 8]', u'[2, 4]', u'klein', u'klein^5', u'klein', u'n', 5, 4, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(52, 10000, u'[12, 3, 8]', u'[2, 12]', u'[2, 7]', u'kleines', u'klein^3e^2s', u'klein', u'n', 3, 4, u'[2, 7]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', None, None, None, None, None, None, None, None, None, None),
(53, 10000, u'[12, 3, 8]', u'[2, 13]', u'[2, 7]', u'kleines', u'kleines^4', u'klein', u's', 4, 6, u'[2, 7]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', None, None, None, None, None, None, None, None, None, None),
(57, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(58, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(59, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 5, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(66, 11111, u'[5, 6, 15, 3]', u'[3, 0]', u'[3, 0]', u'kleines', u'kleine^4s^7', u'klein', u'e', 4, 5, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(67, 11111, u'[5, 6, 15, 3]', u'[3, 0]', u'[3, 0]', u'kleines', u'kleine^4s^7', u'klein', u's', 7, 6, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(68, 11111, u'[5, 6, 15, 3]', u'[3, 1]', u'[3, 0]', u'kleines', u'klein^4e^3s^4', u'klein', u'n', 4, 4, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(69, 11111, u'[5, 6, 15, 3]', u'[3, 1]', u'[3, 0]', u'kleines', u'klein^4e^3s^4', u'klein', u'e', 3, 5, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(70, 11111, u'[5, 6, 15, 3]', u'[3, 1]', u'[3, 0]', u'kleines', u'klein^4e^3s^4', u'klein', u's', 4, 6, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(71, 11111, u'[5, 6, 15, 3]', u'[3, 2]', u'[3, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u'e', 4, 2, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(72, 11111, u'[5, 6, 15, 3]', u'[3, 2]', u'[3, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u'i', 5, 3, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(73, 11111, u'[5, 6, 15, 3]', u'[3, 2]', u'[3, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u'n', 3, 4, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(74, 11111, u'[5, 6, 15, 3]', u'[3, 2]', u'[3, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u's', 3, 6, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(82, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 4, 2, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(83, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'i', 5, 3, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(84, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(85, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 8, 5, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
]
right_syntagma = [u'klein']
right_baseline = [[[u'kleines'], u'klein', 1, 8, u'8', u'20', u'3', u'8', u'8', u'3'], [[u'kleinere'], u'klein', 1, 2, u'2', u'3', u'1', u'2', u'2', u'1'], [[u'kleine'], u'klein', 1, 7, u'5', u'11', u'1', u'2', u'5', u'1'], [[u'klein'], u'klein', 1, 2, u'2', u'4', u'1', u'2', u'2', u'1']]
right_redu = [
(2, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5in^5e": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(11, 10000, u'[12, 3, 8]', u'[1, 0]', u'[1, 0]', u'kleines', u'klein', u'{"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}', 3, u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(12, 10000, u'[12, 3, 8]', u'[2, 0]', u'[2, 0]', u'kleinere', u'klein', u'{"kleinere^5": 1, "kleine^3r^2e^5": 1}', 2, u'NE', u'["neutral", 0.0]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]'),
(14, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'klein', u'{"kle^3i^3n^3": 1, "klein^5": 1}', 2, u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(16, 10000, u'[12, 3, 8]', u'[2, 12]', u'[2, 7]', u'kleines', u'klein', u'{"klein^3e^2s": 1, "kleines^4": 1}', 2, u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', None, None, None, None, None, None, None, None, None, None),
(17, 11111, u'[5, 6, 15, 3]', u'[3, 0]', u'[3, 0]', u'kleines', u'klein', u'{"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}', 3, u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
##item.should.be.equal({"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma})
# # ############################
# # ####### SCOPE 2 ##############
# # ############################
####################### syntagma_type="lexem" #############################
########stemmed_search=False #
### Case 1.1:
syntagma = ["klitze", "kleine"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
#p(item,"item")
#self.pretty_print_uniq(item)
extracted_repl = item["repl"]
extracted_redu = item["redu"]
extracted_baseline = item["baseline"]
extracted_syntagma = item["syntagma"]
right_repl = [
(1, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'i', 4, 2, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'e', 7, 5, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(20, 10000, u'[12, 3, 8]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'klitz', u'e', 4, 5, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]'),
(3, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(4, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'n', 5, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(5, 8888, u'[4, 11]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
]
right_syntagma = ['klitze', 'kleine']
right_baseline = [[[u'klitze', u'kleine'], u'klitz++klein', 2, 4, u'[2, 3]', u'[3, 4]', u'[1, 1]', u'[2, 2]', u'2', u'1']]
right_redu = [
(1, 8888, u'[4, 11]', u'[0, 0]', u'[0, 0]', u'klitze', u'klitz', u'{"klitze": 1, "kli^4tze^7": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5in^5e": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
##item.should.be.equal({"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma})
########stemmed_search=Truee #
### Case 1.2:
syntagma = ["klitz", "klein"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem",stemmed_search=True)
#p(item,"item")
#self.pretty_print_uniq(item)
extracted_repl = item["repl"]
extracted_redu = item["redu"]
extracted_baseline = item["baseline"]
extracted_syntagma = item["syntagma"]
right_repl = [
(1, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'i', 4, 2, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'e', 7, 5, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(20, 10000, u'[12, 3, 8]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'klitz', u'e', 4, 5, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]'),
(42, 10000, u'[12, 3, 8]', u'[2, 5]', u'[2, 3]', u'klitz', u'kli^4tz', u'klitz', u'i', 4, 2, u'[2, 3]', u'NE', u'["neutral", 0.0]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None),
(43, 10000, u'[12, 3, 8]', u'[2, 6]', u'[2, 3]', u'klitz', u'kli^4tz^3', u'klitz', u'i', 4, 2, u'[2, 3]', u'NE', u'["neutral", 0.0]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None),
(44, 10000, u'[12, 3, 8]', u'[2, 6]', u'[2, 3]', u'klitz', u'kli^4tz^3', u'klitz', u'z', 3, 4, u'[2, 3]', u'NE', u'["neutral", 0.0]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None),
(49, 10000, u'[12, 3, 8]', u'[2, 10]', u'[2, 6]', u'klitzes', u'klitzes^4', u'klitz', u's', 4, 6, u'[2, 6]', u'FM', u'["neutral", 0.0]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None, None, None, None, None),
(50, 10000, u'[12, 3, 8]', u'[2, 11]', u'[2, 6]', u'klitzes', u'kli^3tzes^3', u'klitz', u'i', 3, 2, u'[2, 6]', u'FM', u'["neutral", 0.0]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None, None, None, None, None),
(51, 10000, u'[12, 3, 8]', u'[2, 11]', u'[2, 6]', u'klitzes', u'kli^3tzes^3', u'klitz', u's', 3, 6, u'[2, 6]', u'FM', u'["neutral", 0.0]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None, None, None, None, None),
(3, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(4, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'n', 5, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(5, 8888, u'[4, 11]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
(45, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'kle^3i^3n^3', u'klein', u'e', 3, 2, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(46, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'kle^3i^3n^3', u'klein', u'i', 3, 3, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(47, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'kle^3i^3n^3', u'klein', u'n', 3, 4, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(48, 10000, u'[12, 3, 8]', u'[2, 8]', u'[2, 4]', u'klein', u'klein^5', u'klein', u'n', 5, 4, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(52, 10000, u'[12, 3, 8]', u'[2, 12]', u'[2, 7]', u'kleines', u'klein^3e^2s', u'klein', u'n', 3, 4, u'[2, 7]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', None, None, None, None, None, None, None, None, None, None),
(53, 10000, u'[12, 3, 8]', u'[2, 13]', u'[2, 7]', u'kleines', u'kleines^4', u'klein', u's', 4, 6, u'[2, 7]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', None, None, None, None, None, None, None, None, None, None),
]
right_syntagma = [u'klitz', u'klein']
right_baseline = [[[u'klitzes', u'kleines'], u'klitz++klein', 2, 1, u'[2, 2]', u'[3, 2]', u'[1, 1]', u'[2, 2]', u'1', u'1'], [[u'klitz', u'klein'], u'klitz++klein', 2, 1, u'[2, 2]', u'[3, 4]', u'[1, 1]', u'[3, 2]', u'1', u'1'], [[u'klitze', u'kleine'], u'klitz++klein', 2, 4, u'[2, 3]', u'[3, 4]', u'[1, 1]', u'[2, 2]', u'2', u'1']]
right_redu = [
(1, 8888, u'[4, 11]', u'[0, 0]', u'[0, 0]', u'klitze', u'klitz', u'{"klitze": 1, "kli^4tze^7": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(13, 10000, u'[12, 3, 8]', u'[2, 4]', u'[2, 3]', u'klitz', u'klitz', u'{"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}', 3, u'NE', u'["neutral", 0.0]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None),
(15, 10000, u'[12, 3, 8]', u'[2, 10]', u'[2, 6]', u'klitzes', u'klitz', u'{"klitzes^4": 1, "kli^3tzes^3": 1}', 2, u'FM', u'["neutral", 0.0]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None, None, None, None, None),
(2, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5in^5e": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(14, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'klein', u'{"kle^3i^3n^3": 1, "klein^5": 1}', 2, u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(16, 10000, u'[12, 3, 8]', u'[2, 12]', u'[2, 7]', u'kleines', u'klein', u'{"klein^3e^2s": 1, "kleines^4": 1}', 2, u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', None, None, None, None, None, None, None, None, None, None),
]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
##item.should.be.equal({"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma})
########get_also_non_full_repetativ_result=True #
### Case 1.3:
syntagma = [u'.', u'kleinere', u'auswahl']
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem",stemmed_search=False,get_also_non_full_repetativ_result=True)
#p(item,"item")
#self.pretty_print_uniq(item)
right_repl = [
(36, 10000, u'[12, 3, 8]', u'[1, 4]', u'[1, 2]', u'.', u'.^5', u'.', u'.', 5, 0, None, u'symbol', u'["neutral", 0.0]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]'),
(37, 10000, u'[12, 3, 8]', u'[2, 0]', u'[2, 0]', u'kleinere', u'kleinere^5', u'klein', u'e', 5, 7, u'[2, 0]', u'NE', u'["neutral", 0.0]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]'),
(38, 10000, u'[12, 3, 8]', u'[2, 1]', u'[2, 0]', u'kleinere', u'kleine^3r^2e^5', u'klein', u'e', 3, 5, u'[2, 0]', u'NE', u'["neutral", 0.0]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]'),
(39, 10000, u'[12, 3, 8]', u'[2, 1]', u'[2, 0]', u'kleinere', u'kleine^3r^2e^5', u'klein', u'e', 5, 7, u'[2, 0]', u'NE', u'["neutral", 0.0]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]'),
(40, 10000, u'[12, 3, 8]', u'[2, 2]', u'[2, 1]', u'auswahl', u'auswah^3l^4', u'auswahl', u'h', 3, 5, None, u'NN', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]'),
(41, 10000, u'[12, 3, 8]', u'[2, 2]', u'[2, 1]', u'auswahl', u'auswah^3l^4', u'auswahl', u'l', 4, 6, None, u'NN', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]'),
]
right_syntagma = [u'.', u'kleinere', u'auswahl']
right_baseline = [[[u'.', u'kleinere', u'auswahl'], u'.++klein++auswahl', 3, 1, u'[1, 2, 1]', u'[1, 3, 2]', None, None, u'1', None]]
right_redu = [(12, 10000, u'[12, 3, 8]', u'[2, 0]', u'[2, 0]', u'kleinere', u'klein', u'{"kleinere^5": 1, "kleine^3r^2e^5": 1}', 2, u'NE', u'["neutral", 0.0]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]')]
extracted_repl = item["repl"]
extracted_redu = item["redu"]
extracted_baseline = item["baseline"]
extracted_syntagma = item["syntagma"]
# set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
# set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
# list( lunicode(str(elem) for elem in item ) for item in extracted_baseline).should.be.equal(list( list( unicode(elem) for elem in item ) for item in right_baseline))
# extracted_syntagma.should.be.equal(right_syntagma)
#item.should.be.equal({"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma})
# # ################################################################################################################################################
# # ################################################################################################################################################
# # ##### return_full_tuple = True #######################################################################################################+
# # ###########################################################################################################################################
# # ################################################################################################################################################
# # ############################
# # ####### SCOPE 1 ##############
# # ############################
####################### syntagma_type="lexem" #############################
### Case 1.1:
syntagma = ["klitze"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem", return_full_tuple=True)
#self.pretty_print_uniq(item)
#p(item,"item")
extracted_repl = item["repl"]
extracted_redu = item["redu"]
extracted_baseline = item["baseline"]
extracted_syntagma = item["syntagma"]
right_repl = (
[
(54, 11111, u'[5, 6, 15, 3]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'klitz', u'e', 4, 5, None, u'VAPPER', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'sache', u'["NN", null, "sach"]', u'.', u'["symbol", null, "."]', u'die', u'["PDS", null, "die"]', u'aber', u'["ADV", null, "aber"]'),
(1, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'i', 4, 2, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'e', 7, 5, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(20, 10000, u'[12, 3, 8]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'klitz', u'e', 4, 5, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]'),
],
True,
None,
)
right_syntagma = ['klitze']
right_baseline = [[[u'klitze'], u'klitz', 1, 8, u'3', u'4', u'2', u'6', u'3', u'2']]
right_redu = (
[
(18, 12222, u'[24]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitz', u'{"klitze": 4}', 4, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u',', u'["symbol", null, ","]', u'die', u'["PRELS", null, "die"]', u'ich', u'["PPER", null, "ich"]'),
(1, 8888, u'[4, 11]', u'[0, 0]', u'[0, 0]', u'klitze', u'klitz', u'{"klitze": 1, "kli^4tze^7": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
],
True,
None,
)
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
##item.should.be.equal({"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma})
# # # ############################
# # # ####### SCOPE 2 ##############
# # # ############################
####################### syntagma_type="lexem" #############################
### Case 1.1:
syntagma = ["klitze", "kleine"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem", return_full_tuple=True)
#p(item,"item")
#self.pretty_print_uniq(item)
extracted_repl = item["repl"]
extracted_redu = item["redu"]
extracted_baseline = item["baseline"]
extracted_syntagma = item["syntagma"]
right_repl = (
[
(1, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'i', 4, 2, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'e', 7, 5, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(20, 10000, u'[12, 3, 8]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'klitz', u'e', 4, 5, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]'),
(3, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(4, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'n', 5, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(5, 8888, u'[4, 11]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
],
True,
2,
)
right_syntagma = ['klitze', 'kleine']
right_baseline = [[[u'klitze', u'kleine'], u'klitz++klein', 2, 4, u'[2, 3]', u'[3, 4]', u'[1, 1]', u'[2, 2]', u'2', u'1']]
right_redu = (
[
(1, 8888, u'[4, 11]', u'[0, 0]', u'[0, 0]', u'klitze', u'klitz', u'{"klitze": 1, "kli^4tze^7": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5in^5e": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
],
True,
1,
)
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
##item.should.be.equal({"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma})
# # ################################################################################################################################################
# # ################################################ II. FullRepetativnes= False #################################################################################
# # ########################################################################################################################################
stats.recompute_syntagma_repetativity_scope(False)
# # # ############################
# # # ####### SCOPE 2 ##############
# # # ############################
####################### syntagma_type="lexem" #############################
### Case 1.1:
syntagma = ["klitze", "kleine"]
item = stats._get_data_for_one_syntagma(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
#p(item,"item")
#self.pretty_print_uniq(item)
extracted_repl = item["repl"]
extracted_redu = item["redu"]
extracted_baseline = item["baseline"]
extracted_syntagma = item["syntagma"]
right_repl = [
(54, 11111, u'[5, 6, 15, 3]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'klitz', u'e', 4, 5, None, u'VAPPER', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'sache', u'["NN", null, "sach"]', u'.', u'["symbol", null, "."]', u'die', u'["PDS", null, "die"]', u'aber', u'["ADV", null, "aber"]'),
(1, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'i', 4, 2, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'e', 7, 5, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(20, 10000, u'[12, 3, 8]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'klitz', u'e', 4, 5, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]'),
(3, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(4, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'n', 5, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(5, 8888, u'[4, 11]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
]
right_syntagma = ['klitze', 'kleine']
right_baseline = [[[u'klitze', u'kleine'], u'klitz++klein', 2, 4, u'[3, 3]', u'[4, 4]', u'[2, 1]', u'[6, 2]', None, None]]
right_redu = [
(18, 12222, u'[24]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitz', u'{"klitze": 4}', 4, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u',', u'["symbol", null, ","]', u'die', u'["PRELS", null, "die"]', u'ich', u'["PPER", null, "ich"]'),
(1, 8888, u'[4, 11]', u'[0, 0]', u'[0, 0]', u'klitze', u'klitz', u'{"klitze": 1, "kli^4tze^7": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5in^5e": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
##item.should.be.equal({"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma})
####################### syntagma_type="pos" #############################
@attr(status='stable')
#@wipd
def test_get_data_611_2(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#### DE ######
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True, status_bar=True)#, )
######### EN ########
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_en))
###### 3
syntagma = ["EMOIMG","EMOIMG"]
data1 = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos", order_output_by_syntagma_order=False,if_type_pos_return_lexem_syn=False))
repl1 = sorted(data1[0]["repl"])
redu1 = sorted(data1[0]["redu"])
data2 = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos", order_output_by_syntagma_order=False,if_type_pos_return_lexem_syn=True))
ext_id = []
repl2 = []
for item in data2:
for r in item["repl"]:
if r[0] not in ext_id:
ext_id.append(r[0])
repl2.append(r)
ext_id = []
redu2 = []
for item in data2:
for r in item["redu"]:
if r[0] not in ext_id:
ext_id.append(r[0])
redu2.append(r)
#repl2 = sorted([r for item in data2 )
#redu2 = sorted([r for item in data2 for r in item["redu"]])
assert len(repl1)<= len(repl2)
assert len(redu1)<= len(redu2)
#p((repl1, repl2))
#p((redu1, redu2))
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
######### DE #######
stats.close()
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_de))
################################################################################################
################################################################################################
###################################################################################################
############################stemmed_search = False #########################################
#################################################################################################
################################################################################################
################################################################################################
#
################################################################################################################################################
################################################ II. FullRepetativnes= True #################################################################################
########################################################################################################################################
stats.recompute_syntagma_repetativity_scope(True)
### Case 0:
syntagma = ["big"]
data = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem"))
#p(data,"data")
data.should.be.equal([])
# # # # ####################################################################################
# # # # ####################################################################################
# # # # #################################################################################
### Case 1.1:
syntagma = ["kleine"]
data = stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem")
#p()
len1 = len(data)
data = list(data)
len2 = len(data)
len1.should.be.equal(len2)
#p(data,"data")
#self.pretty_print_uniq(data[0])
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = [
(82, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 4, 2, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(83, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'i', 5, 3, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(84, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(85, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 8, 5, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(3, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(4, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'n', 5, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(5, 8888, u'[4, 11]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
(57, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(58, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(59, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 5, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
]
right_syntagma = [u'kleine']
right_baseline = [[[u'kleine'], u'klein', 1, 7, u'5', u'11', u'1', u'2', u'5', u'1']]
right_redu = [(2, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5in^5e": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]')]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# #data.should.be.equal([{"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma}])
# # # ####################################################################################
# # # ######################GET JUST FEW COLUMNS ##################################
# # # ################################################################################
### Case 1.2:
columns_repl=['doc_id', 'redufree_len','index_in_redufree','index_in_corpus']
columns_redu = ['doc_id', 'redufree_len','index_in_redufree', 'index_in_corpus',"redu_length"]
columns_baseline = ['syntagma', 'occur_syntagma_all', "scope"]
#columns_baseline =
syntagma = ["kleine"]
data = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem", get_columns_repl=columns_repl , get_columns_redu=columns_redu, get_columns_baseline=columns_baseline ))
#p(data,"data")
#self.pretty_print_uniq(data[0])
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = [
[12222, u'[24]', u'[0, 21]', u'[0, 24]'],
[12222, u'[24]', u'[0, 21]', u'[0, 24]'],
[12222, u'[24]', u'[0, 21]', u'[0, 24]'],
[12222, u'[24]', u'[0, 21]', u'[0, 24]'],
[8888, u'[4, 11]', u'[0, 1]', u'[0, 2]'],
[8888, u'[4, 11]', u'[0, 1]', u'[0, 2]'],
[8888, u'[4, 11]', u'[0, 1]', u'[0, 3]'],
[10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]'],
[11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]'],
[11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]'],
[11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]'],
]
right_syntagma = [u'kleine']
right_baseline = [[[u'kleine'], 7, 1]]
right_redu = [[8888, u'[4, 11]', u'[0, 1]', u'[0, 2]', 2]]
#p(self.convert_all_lists_to_tuples(extracted_repl))
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# #data.should.be.equal([{"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma}])
# # # ####################################################################################
# # # ##################### #GET ORDERED SYNTAGMA (SCOPE 1) ##################################
# # # ##############################################################################
### Case 1.3: #order_output_by_syntagma_order
syntagma = ["kleine"]
data = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem",order_output_by_syntagma_order=True))
#p(data,"data")
#self.pretty_print_uniq(data[0],syn_order=True)
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = [
(u'kleine', (
(82, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 4, 2, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(83, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'i', 5, 3, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(84, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(85, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 8, 5, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(3, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(4, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'n', 5, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(5, 8888, u'[4, 11]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
(57, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(58, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(59, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 5, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
)
),
]
right_syntagma = [u'kleine']
right_baseline = [[[u'kleine'], u'klein', 1, 7, u'5', u'11', u'1', u'2', u'5', u'1']]
right_redu = [
(u'kleine', (
(2, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5in^5e": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
)
),
]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# #data.should.be.equal([{"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma}])
# # # ####################################################################################
# # # ######################GET JUST FEW PHENOMENA AND NOT ALL#########################
# # # ####################################################################################
### Case 2.1:
### repl=True,redu=False, baseline=False
syntagma = ["kleine"]
data = list(stats.get_data(syntagma, repl=True, redu=False, baseline=False, sentiment=False, syntagma_type="lexem"))
#p(data,"data")
#self.pretty_print_uniq(data[0])
right_repl = [
(82, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 4, 2, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(83, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'i', 5, 3, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(84, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(85, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 8, 5, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(3, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(4, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'n', 5, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(5, 8888, u'[4, 11]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
(57, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(58, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(59, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 5, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
]
right_syntagma = [u'kleine']
right_baseline = []
right_redu = []
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# #data.should.be.equal([{"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma}])
### Case 2.2:
### repl=False,redu=True, baseline=False
syntagma = ["kleine"]
data = list(stats.get_data(syntagma, repl=False, redu=True, baseline=False, sentiment=False, syntagma_type="lexem"))
#p(data,"data")
#self.pretty_print_uniq(data[0])
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = []
right_syntagma = [u'kleine']
right_baseline = []
right_redu = [(2, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5in^5e": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]')]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# #data.should.be.equal([{"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma}])
### Case 2.3:
### repl=False,redu=False, baseline=True
syntagma = ["kleine"]
data = list(stats.get_data(syntagma, repl=False, redu=False, baseline=True, sentiment=False, syntagma_type="lexem"))
#p(data,"data")
#self.pretty_print_uniq(data[0])
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = []
right_syntagma = [u'kleine']
right_baseline = [[[u'kleine'], u'klein', 1, 7, u'5', u'11', u'1', u'2', u'5', u'1']]
right_redu = []
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# #data.should.be.equal([{"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma}])
# # # ####################################################################################
# # # #################. GET ORDERED/UNORDERED OUTPUT #################################
# # # # #################################################################################
### Case 3.1:
#order_output_by_syntagma_order = True
# full_tuple = False
syntagma = ["kleine","Überaschung"]
data = stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem",order_output_by_syntagma_order=True,return_full_tuple=False)
len1 = len(data)
data = list(data)
len2 = len(data)
len1.should.be.equal(len2)
#p(data,"data")
#self.pretty_print_uniq(data[0],syn_order=True)
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = [
(u'kleine', (
(82, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 4, 2, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(83, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'i', 5, 3, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(84, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(85, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 8, 5, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
(57, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(58, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(59, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 5, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
)
),
(u'\xfcberaschung', (
(86, 12222, u'[24]', u'[0, 25]', u'[0, 22]', u'\xfcberaschung', u'\xfcbe^4r^5a^3schun^6g^3', u'uberasch', u'e', 4, 2, None, u'NN', u'["neutral", 0.0]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None, None, None),
(87, 12222, u'[24]', u'[0, 25]', u'[0, 22]', u'\xfcberaschung', u'\xfcbe^4r^5a^3schun^6g^3', u'uberasch', u'r', 5, 3, None, u'NN', u'["neutral", 0.0]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None, None, None),
(88, 12222, u'[24]', u'[0, 25]', u'[0, 22]', u'\xfcberaschung', u'\xfcbe^4r^5a^3schun^6g^3', u'uberasch', u'a', 3, 4, None, u'NN', u'["neutral", 0.0]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None, None, None),
(89, 12222, u'[24]', u'[0, 25]', u'[0, 22]', u'\xfcberaschung', u'\xfcbe^4r^5a^3schun^6g^3', u'uberasch', u'n', 6, 9, None, u'NN', u'["neutral", 0.0]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None, None, None),
(90, 12222, u'[24]', u'[0, 25]', u'[0, 22]', u'\xfcberaschung', u'\xfcbe^4r^5a^3schun^6g^3', u'uberasch', u'g', 3, 10, None, u'NN', u'["neutral", 0.0]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None, None, None),
(22, 10000, u'[12, 3, 8]', u'[0, 3]', u'[0, 3]', u'\xfcberaschung', u'\xfcber^4aschung', u'uberasch', u'r', 4, 3, None, u'NN', u'["neutral", 0.0]', None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'kleine', u'["ADJA", null, "klein"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]'),
(60, 11111, u'[5, 6, 15, 3]', u'[2, 5]', u'[2, 5]', u'\xfcberaschung', u'\xfcber^5aschung', u'uberasch', u'r', 5, 3, None, u'NN', u'["neutral", 0.0]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]', 3, u'["number", null, "3"]'),
)
),
]
right_syntagma = [u'kleine', u'\xfcberaschung']
right_baseline = [[[u'kleine', u'\xfcberaschung'], u'klein++uberasch', 2, 5, u'[3, 3]', u'[8, 7]', None, None, u'3', None]]
right_redu = ()
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# #data.should.be.equal([{"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma}])
### Case 3.2:
#order_output_by_syntagma_order = True
# full_tuple = True
syntagma = ["kleine","Überaschung"]
data = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem",order_output_by_syntagma_order=True,return_full_tuple=True))
#p(data,"data")
#p(data[0],"data[0]")
#self.pretty_print_uniq(data[0],syn_order=True)
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = (
[
(u'kleine', (
(82, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 4, 2, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(83, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'i', 5, 3, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(84, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(85, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 8, 5, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
(57, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(58, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(59, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 5, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
)
),
(u'\xfcberaschung', (
(86, 12222, u'[24]', u'[0, 25]', u'[0, 22]', u'\xfcberaschung', u'\xfcbe^4r^5a^3schun^6g^3', u'uberasch', u'e', 4, 2, None, u'NN', u'["neutral", 0.0]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None, None, None),
(87, 12222, u'[24]', u'[0, 25]', u'[0, 22]', u'\xfcberaschung', u'\xfcbe^4r^5a^3schun^6g^3', u'uberasch', u'r', 5, 3, None, u'NN', u'["neutral", 0.0]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None, None, None),
(88, 12222, u'[24]', u'[0, 25]', u'[0, 22]', u'\xfcberaschung', u'\xfcbe^4r^5a^3schun^6g^3', u'uberasch', u'a', 3, 4, None, u'NN', u'["neutral", 0.0]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None, None, None),
(89, 12222, u'[24]', u'[0, 25]', u'[0, 22]', u'\xfcberaschung', u'\xfcbe^4r^5a^3schun^6g^3', u'uberasch', u'n', 6, 9, None, u'NN', u'["neutral", 0.0]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None, None, None),
(90, 12222, u'[24]', u'[0, 25]', u'[0, 22]', u'\xfcberaschung', u'\xfcbe^4r^5a^3schun^6g^3', u'uberasch', u'g', 3, 10, None, u'NN', u'["neutral", 0.0]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None, None, None),
(22, 10000, u'[12, 3, 8]', u'[0, 3]', u'[0, 3]', u'\xfcberaschung', u'\xfcber^4aschung', u'uberasch', u'r', 4, 3, None, u'NN', u'["neutral", 0.0]', None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'kleine', u'["ADJA", null, "klein"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]'),
(60, 11111, u'[5, 6, 15, 3]', u'[2, 5]', u'[2, 5]', u'\xfcberaschung', u'\xfcber^5aschung', u'uberasch', u'r', 5, 3, None, u'NN', u'["neutral", 0.0]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]', 3, u'["number", null, "3"]'),
)
),
],
True,
3,
)
right_syntagma = [u'kleine', u'\xfcberaschung']
right_baseline = [[[u'kleine', u'\xfcberaschung'], u'klein++uberasch', 2, 5, u'[3, 3]', u'[8, 7]', None, None, u'3', None]]
right_redu = None
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
extracted_redu.should.be.equal(right_redu)
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# #data.should.be.equal([{"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma}])
### Case 3.3:
#order_output_by_syntagma_order
syntagma = ["klitze","kleine", "überaschung"]
data = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem",order_output_by_syntagma_order=True))
#p(data,"data")
#self.pretty_print_uniq(data[0],syn_order=True)
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = [
(u'klitze', (
(20, 10000, u'[12, 3, 8]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'klitz', u'e', 4, 5, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]'),
)
),
(u'kleine', (
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
)
),
(u'\xfcberaschung', (
(22, 10000, u'[12, 3, 8]', u'[0, 3]', u'[0, 3]', u'\xfcberaschung', u'\xfcber^4aschung', u'uberasch', u'r', 4, 3, None, u'NN', u'["neutral", 0.0]', None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'kleine', u'["ADJA", null, "klein"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]'),
)
),
]
right_syntagma = [u'klitze', u'kleine', u'\xfcberaschung']
right_baseline = [[[u'klitze', u'kleine', u'\xfcberaschung'], u'klitz++klein++uberasch', 3, 3, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None]]
right_redu = ()
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# #data.should.be.equal([{"repl":right_repl, "redu":right_redu,"baseline":right_baseline, "syntagma":right_syntagma}])
# # # ####################################################################################
# # # #################. WORK WITH POS #########################################
# # # ##############################################################################
# ### Case 5.1:
#full_repetativ_syntagma=False
syntagma = ["NN", "NE"]
data = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos"))
#p(data,"data")
#self.pretty_print_uniq(data[0],syn_order=False)
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = [
(1, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'i', 4, 2, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'e', 7, 5, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(3, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(4, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'n', 5, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(5, 8888, u'[4, 11]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
]
right_syntagma = [u'NN', u'NE']
right_baseline = [[[u'klitze', u'kleine', u'\xfcberaschung'], u'klitz++klein++uberasch', 3, 3, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None], [[u'klitze', u'kleine'], u'klitz++klein', 2, 4, u'[2, 3]', u'[3, 4]', u'[1, 1]', u'[2, 2]', u'2', u'1'], [[u'klitze'], u'klitz', 1, 8, u'3', u'4', u'2', u'6', u'3', u'2'], [[u'klitze', u'kleine', u'\xfcberaschung', u'.'], u'klitz++klein++uberasch++.', 4, 1, None, None, None, None, None, None], [[u'kleine'], u'klein', 1, 7, u'5', u'11', u'1', u'2', u'5', u'1'], [[u'klitze', u'kleine', u'\xfcberaschung', u'.', u'trotzdem', u'hat'], u'klitz++klein++uberasch++.++trotzd++hat', 6, 1, None, None, None, None, None, None], [[u'kleine', u'\xfcberaschung', u'.', u'trotzdem'], u'klein++uberasch++.++trotzd', 4, 1, None, None, None, None, None, None], [[u'kleine', u'\xfcberaschung', u'.'], u'klein++uberasch++.', 3, 2, None, None, None, None, None, None], [[u'kleine', u'\xfcberaschung', u'.', u'trotzdem', u'hat'], u'klein++uberasch++.++trotzd++hat', 5, 1, None, None, None, None, None, None], [[u'klitze', u'kleine', u'\xfcberaschung', u'.', u'trotzdem'], u'klitz++klein++uberasch++.++trotzd', 5, 1, None, None, None, None, None, None], [[u'kleine', u'\xfcberaschung', u'.', u'trotzdem', u'hat', u'sie'], u'klein++uberasch++.++trotzd++hat++sie', 6, 1, None, None, None, None, None, None], [[u'kleine', u'\xfcberaschung'], u'klein++uberasch', 2, 5, u'[3, 3]', u'[8, 7]', None, None, u'3', None]]
right_redu = [
(1, 8888, u'[4, 11]', u'[0, 0]', u'[0, 0]', u'klitze', u'klitz', u'{"klitze": 1, "kli^4tze^7": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5in^5e": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# # # ####################################################################################
# # # ################ #WORK WITH NUMBERS##########################################
# # # #################################################################################
# ### Case 6.1:
syntagma = ["number"]
data = stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos")
len1 = len(data)
data = list(data)
len2 = len(data)
len1.should.be.equal(len2)
#p((len1, len2))
# p(data,"data")
#self.pretty_print_uniq(data[0],syn_order=False)
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = [
(78, 12222, u'[24]', u'[0, 14]', u'[0, 11]', u'1', u'1^6', u'1', u'1', 6, 0, None, u'number', u'["neutral", 0.0]', u'ich', u'["PPER", null, "ich"]', u'mal', u'["PTKMA", null, "mal"]', u'gerne', u'["ADV", null, "gern"]', u'hate', u'["VAFIN", null, "hat"]', u'.', u'["symbol", null, "."]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', 1, u'["number", null, "1"]', u'du', u'["PPER", null, "du"]', u'meintest', u'["VVFIN", null, "meint"]', u',', u'["symbol", null, ","]'),
(80, 12222, u'[24]', u'[0, 16]', u'[0, 13]', u'1', u'1^8', u'1', u'1', 8, 0, None, u'number', u'["neutral", 0.0]', u'gerne', u'["ADV", null, "gern"]', u'hate', u'["VAFIN", null, "hat"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', u'\U0001f62b', u'["EMOIMG", null, "\\ud83d\\ude2b"]', u'du', u'["PPER", null, "du"]', u'meintest', u'["VVFIN", null, "meint"]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]'),
(61, 11111, u'[5, 6, 15, 3]', u'[2, 8]', u'[2, 8]', u'1', u'1^5', u'1', u'1', 5, 0, None, u'number', u'["neutral", 0.0]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 2, u'["number", null, "2"]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]'),
(62, 11111, u'[5, 6, 15, 3]', u'[2, 9]', u'[2, 9]', u'2', u'2^4', u'2', u'2', 4, 0, None, u'number', u'["neutral", 0.0]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]'),
(63, 11111, u'[5, 6, 15, 3]', u'[2, 10]', u'[2, 10]', u'3', u'3^5', u'3', u'3', 5, 0, None, u'number', u'["neutral", 0.0]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]'),
(64, 11111, u'[5, 6, 15, 3]', u'[2, 11]', u'[2, 11]', u'4', u'4^4', u'4', u'4', 4, 0, None, u'number', u'["neutral", 0.0]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]', 3, u'["number", null, "3"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]'),
(65, 11111, u'[5, 6, 15, 3]', u'[2, 12]', u'[2, 12]', u'5', u'5^5', u'5', u'5', 5, 0, None, u'number', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]'),
]
right_syntagma = [u'number']
right_baseline = [[[u'1', u'\U0001f62b', u'1', u'du', u'meintest'], u'1++\U0001f62b++1++du++meint', 5, 1, None, None, None, None, None, None], [[u'3', u'4', u'5', u'6', u'.', u'kleines'], u'3++4++5++6++.++klein', 6, 1, None, None, None, None, None, None], [[u'3', u'4', u'5', u'6'], u'3++4++5++6', 4, 1, None, None, None, None, None, None], [[u'1', u'2'], u'1++2', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None], [[u'5', u'6'], u'5++6', 2, 1, None, None, None, None, None, None], [[u'1', u'\U0001f62b', u'1', u'du', u'meintest', u','], u'1++\U0001f62b++1++du++meint++,', 6, 1, None, None, None, None, None, None], [[u'2'], u'2', 1, 1, u'1', u'1', None, None, u'1', None], [[u'1', u'2', u'3', u'4'], u'1++2++3++4', 4, 1, u'[1, 1, 1, 1]', u'[1, 1, 1, 1]', None, None, u'1', None], [[u'2', u'3', u'4', u'5', u'6'], u'2++3++4++5++6', 5, 1, None, None, None, None, None, None], [[u'1', u'\U0001f62b'], u'1++\U0001f62b', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None], [[u'5', u'6', u'.', u'kleines'], u'5++6++.++klein', 4, 1, None, None, None, None, None, None], [[u'3', u'4'], u'3++4', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None], [[u'1', u'du', u'meintest', u','], u'1++du++meint++,', 4, 1, None, None, None, None, None, None], [[u'1', u'2', u'3', u'4', u'5', u'6'], u'1++2++3++4++5++6', 6, 1, None, None, None, None, None, None], [[u'4', u'5', u'6', u'.', u'kleines', u'm\xe4dchen'], u'4++5++6++.++klein++madch', 6, 1, None, None, None, None, None, None], [[u'1', u'\U0001f62b', u'1', u'du'], u'1++\U0001f62b++1++du', 4, 1, None, None, None, None, None, None], [[u'2', u'3', u'4'], u'2++3++4', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None], [[u'3', u'4', u'5'], u'3++4++5', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None], [[u'4', u'5', u'6', u'.'], u'4++5++6++.', 4, 1, None, None, None, None, None, None], [[u'4'], u'4', 1, 1, u'1', u'1', None, None, u'1', None], [[u'1', u'2', u'3'], u'1++2++3', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None], [[u'1', u'du'], u'1++du', 2, 1, None, None, None, None, None, None], [[u'1'], u'1', 1, 3, u'3', u'3', None, None, u'3', None], [[u'2', u'3'], u'2++3', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None], [[u'3'], u'3', 1, 1, u'1', u'1', None, None, u'1', None], [[u'4', u'5', u'6'], u'4++5++6', 3, 1, None, None, None, None, None, None], [[u'1', u'\U0001f62b', u'1'], u'1++\U0001f62b++1', 3, 1, u'[2, 1, "IGNOR"]', u'[2, 1, "IGNOR"]', None, None, u'1', None], [[u'5'], u'5', 1, 1, u'1', u'1', None, None, u'1', None], [[u'4', u'5', u'6', u'.', u'kleines'], u'4++5++6++.++klein', 5, 1, None, None, None, None, None, None], [[u'5', u'6', u'.'], u'5++6++.', 3, 1, None, None, None, None, None, None], [[u'2', u'3', u'4', u'5', u'6', u'.'], u'2++3++4++5++6++.', 6, 1, None, None, None, None, None, None], [[u'1', u'du', u'meintest', u',', u'es'], u'1++du++meint++,++es', 5, 1, None, None, None, None, None, None], [[u'5', u'6', u'.', u'kleines', u'm\xe4dchen', u'.'], u'5++6++.++klein++madch++.', 6, 1, None, None, None, None, None, None], [[u'3', u'4', u'5', u'6', u'.'], u'3++4++5++6++.', 5, 1, None, None, None, None, None, None], [[u'1', u'du', u'meintest', u',', u'es', u'war'], u'1++du++meint++,++es++war', 6, 1, None, None, None, None, None, None], [[u'2', u'3', u'4', u'5'], u'2++3++4++5', 4, 1, u'[1, 1, 1, 1]', u'[1, 1, 1, 1]', None, None, u'1', None], [[u'1', u'du', u'meintest'], u'1++du++meint', 3, 1, None, None, None, None, None, None], [[u'1', u'2', u'3', u'4', u'5'], u'1++2++3++4++5', 5, 1, u'[1, 1, 1, 1, 1]', u'[1, 1, 1, 1, 1]', None, None, u'1', None], [[u'5', u'6', u'.', u'kleines', u'm\xe4dchen'], u'5++6++.++klein++madch', 5, 1, None, None, None, None, None, None], [[u'4', u'5'], u'4++5', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None]]
right_redu = ()
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# ### Case 6.2:
#### Problem, by repetativ syntagmas -> repetativ rep_ids
syntagma = ["number","number"]
data = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos"))
# p(data,"data")
#self.pretty_print_uniq(data[0],syn_order=False)
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = [
(61, 11111, u'[5, 6, 15, 3]', u'[2, 8]', u'[2, 8]', u'1', u'1^5', u'1', u'1', 5, 0, None, u'number', u'["neutral", 0.0]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 2, u'["number", null, "2"]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]'),
(62, 11111, u'[5, 6, 15, 3]', u'[2, 9]', u'[2, 9]', u'2', u'2^4', u'2', u'2', 4, 0, None, u'number', u'["neutral", 0.0]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]'),
(63, 11111, u'[5, 6, 15, 3]', u'[2, 10]', u'[2, 10]', u'3', u'3^5', u'3', u'3', 5, 0, None, u'number', u'["neutral", 0.0]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]'),
(64, 11111, u'[5, 6, 15, 3]', u'[2, 11]', u'[2, 11]', u'4', u'4^4', u'4', u'4', 4, 0, None, u'number', u'["neutral", 0.0]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]', 3, u'["number", null, "3"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]'),
]
right_syntagma = [u'number', u'number']
right_baseline = [[[u'3', u'4', u'5', u'6', u'.', u'kleines'], u'3++4++5++6++.++klein', 6, 1, None, None, None, None, None, None], [[u'3', u'4', u'5', u'6'], u'3++4++5++6', 4, 1, None, None, None, None, None, None], [[u'1', u'2'], u'1++2', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None], [[u'2'], u'2', 1, 1, u'1', u'1', None, None, u'1', None], [[u'1', u'2', u'3', u'4'], u'1++2++3++4', 4, 1, u'[1, 1, 1, 1]', u'[1, 1, 1, 1]', None, None, u'1', None], [[u'2', u'3', u'4', u'5', u'6'], u'2++3++4++5++6', 5, 1, None, None, None, None, None, None], [[u'3', u'4'], u'3++4', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None], [[u'1', u'2', u'3', u'4', u'5', u'6'], u'1++2++3++4++5++6', 6, 1, None, None, None, None, None, None], [[u'4', u'5', u'6', u'.', u'kleines', u'm\xe4dchen'], u'4++5++6++.++klein++madch', 6, 1, None, None, None, None, None, None], [[u'2', u'3', u'4'], u'2++3++4', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None], [[u'3', u'4', u'5'], u'3++4++5', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None], [[u'4', u'5', u'6', u'.'], u'4++5++6++.', 4, 1, None, None, None, None, None, None], [[u'4'], u'4', 1, 1, u'1', u'1', None, None, u'1', None], [[u'1', u'2', u'3'], u'1++2++3', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None], [[u'1'], u'1', 1, 3, u'3', u'3', None, None, u'3', None], [[u'2', u'3'], u'2++3', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None], [[u'3'], u'3', 1, 1, u'1', u'1', None, None, u'1', None], [[u'4', u'5', u'6', u'.', u'kleines'], u'4++5++6++.++klein', 5, 1, None, None, None, None, None, None], [[u'2', u'3', u'4', u'5', u'6', u'.'], u'2++3++4++5++6++.', 6, 1, None, None, None, None, None, None], [[u'4', u'5', u'6'], u'4++5++6', 3, 1, None, None, None, None, None, None], [[u'3', u'4', u'5', u'6', u'.'], u'3++4++5++6++.', 5, 1, None, None, None, None, None, None], [[u'2', u'3', u'4', u'5'], u'2++3++4++5', 4, 1, u'[1, 1, 1, 1]', u'[1, 1, 1, 1]', None, None, u'1', None], [[u'1', u'2', u'3', u'4', u'5'], u'1++2++3++4++5', 5, 1, u'[1, 1, 1, 1, 1]', u'[1, 1, 1, 1, 1]', None, None, u'1', None], [[u'4', u'5'], u'4++5', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None]]
right_redu = ()
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# ### Case 6.3:
syntagma = ["number","number","number"]
data = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos"))
# p(data,"data")
#self.pretty_print_uniq(data[0],syn_order=False)
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = [
(61, 11111, u'[5, 6, 15, 3]', u'[2, 8]', u'[2, 8]', u'1', u'1^5', u'1', u'1', 5, 0, None, u'number', u'["neutral", 0.0]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 2, u'["number", null, "2"]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]'),
(62, 11111, u'[5, 6, 15, 3]', u'[2, 9]', u'[2, 9]', u'2', u'2^4', u'2', u'2', 4, 0, None, u'number', u'["neutral", 0.0]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]'),
(63, 11111, u'[5, 6, 15, 3]', u'[2, 10]', u'[2, 10]', u'3', u'3^5', u'3', u'3', 5, 0, None, u'number', u'["neutral", 0.0]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]'),
]
right_syntagma = [u'number', u'number', u'number']
right_baseline = [[[u'1', u'2', u'3', u'4'], u'1++2++3++4', 4, 1, u'[1, 1, 1, 1]', u'[1, 1, 1, 1]', None, None, u'1', None], [[u'2', u'3', u'4'], u'2++3++4', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None], [[u'2', u'3', u'4', u'5', u'6'], u'2++3++4++5++6', 5, 1, None, None, None, None, None, None], [[u'2', u'3', u'4', u'5', u'6', u'.'], u'2++3++4++5++6++.', 6, 1, None, None, None, None, None, None], [[u'3', u'4', u'5', u'6', u'.', u'kleines'], u'3++4++5++6++.++klein', 6, 1, None, None, None, None, None, None], [[u'1', u'2'], u'1++2', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None], [[u'3', u'4', u'5', u'6'], u'3++4++5++6', 4, 1, None, None, None, None, None, None], [[u'3', u'4'], u'3++4', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None], [[u'1', u'2', u'3'], u'1++2++3', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None], [[u'1'], u'1', 1, 3, u'3', u'3', None, None, u'3', None], [[u'2', u'3'], u'2++3', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None], [[u'1', u'2', u'3', u'4', u'5', u'6'], u'1++2++3++4++5++6', 6, 1, None, None, None, None, None, None], [[u'2', u'3', u'4', u'5'], u'2++3++4++5', 4, 1, u'[1, 1, 1, 1]', u'[1, 1, 1, 1]', None, None, u'1', None], [[u'3'], u'3', 1, 1, u'1', u'1', None, None, u'1', None], [[u'2'], u'2', 1, 1, u'1', u'1', None, None, u'1', None], [[u'1', u'2', u'3', u'4', u'5'], u'1++2++3++4++5', 5, 1, u'[1, 1, 1, 1, 1]', u'[1, 1, 1, 1, 1]', None, None, u'1', None], [[u'3', u'4', u'5'], u'3++4++5', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None], [[u'3', u'4', u'5', u'6', u'.'], u'3++4++5++6++.', 5, 1, None, None, None, None, None, None]]
right_redu = ()
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# ### Case 6.4:#
syntagma = ["number","number","number"]
data = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos", order_output_by_syntagma_order=True))
#p(data,"data")
#self.pretty_print_uniq(data[0],syn_order=False)
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = [
(u'number', ((61, 11111, u'[5, 6, 15, 3]', u'[2, 8]', u'[2, 8]', u'1', u'1^5', u'1', u'1', 5, 0, None, u'number', u'["neutral", 0.0]', u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 2, u'["number", null, "2"]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]'),)),
(u'number', ((62, 11111, u'[5, 6, 15, 3]', u'[2, 9]', u'[2, 9]', u'2', u'2^4', u'2', u'2', 4, 0, None, u'number', u'["neutral", 0.0]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]'),)),
(u'number', ((63, 11111, u'[5, 6, 15, 3]', u'[2, 10]', u'[2, 10]', u'3', u'3^5', u'3', u'3', 5, 0, None, u'number', u'["neutral", 0.0]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]'),)),
]
right_syntagma = [u'number', u'number', u'number']
right_baseline = [[[u'1', u'2', u'3', u'4'], u'1++2++3++4', 4, 1, u'[1, 1, 1, 1]', u'[1, 1, 1, 1]', None, None, u'1', None], [[u'2', u'3', u'4'], u'2++3++4', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None], [[u'2', u'3', u'4', u'5', u'6'], u'2++3++4++5++6', 5, 1, None, None, None, None, None, None], [[u'2', u'3', u'4', u'5', u'6', u'.'], u'2++3++4++5++6++.', 6, 1, None, None, None, None, None, None], [[u'3', u'4', u'5', u'6', u'.', u'kleines'], u'3++4++5++6++.++klein', 6, 1, None, None, None, None, None, None], [[u'1', u'2'], u'1++2', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None], [[u'3', u'4', u'5', u'6'], u'3++4++5++6', 4, 1, None, None, None, None, None, None], [[u'3', u'4'], u'3++4', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None], [[u'1', u'2', u'3'], u'1++2++3', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None], [[u'1'], u'1', 1, 3, u'3', u'3', None, None, u'3', None], [[u'2', u'3'], u'2++3', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None], [[u'1', u'2', u'3', u'4', u'5', u'6'], u'1++2++3++4++5++6', 6, 1, None, None, None, None, None, None], [[u'2', u'3', u'4', u'5'], u'2++3++4++5', 4, 1, u'[1, 1, 1, 1]', u'[1, 1, 1, 1]', None, None, u'1', None], [[u'3'], u'3', 1, 1, u'1', u'1', None, None, u'1', None], [[u'2'], u'2', 1, 1, u'1', u'1', None, None, u'1', None], [[u'1', u'2', u'3', u'4', u'5'], u'1++2++3++4++5', 5, 1, u'[1, 1, 1, 1, 1]', u'[1, 1, 1, 1, 1]', None, None, u'1', None], [[u'3', u'4', u'5'], u'3++4++5', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None], [[u'3', u'4', u'5', u'6', u'.'], u'3++4++5++6++.', 5, 1, None, None, None, None, None, None]]
right_redu = ()
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
### Case 6.5:#
###### 1
syntagma = ["EMOIMG"]
data1 = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos", order_output_by_syntagma_order=False,if_type_pos_return_lexem_syn=False))
repl1 = sorted(data1[0]["repl"])
redu1 = sorted(data1[0]["redu"])
data2 = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos", order_output_by_syntagma_order=False,if_type_pos_return_lexem_syn=True))
repl2 = sorted([r for item in data2 for r in item["repl"]])
redu2 = sorted([r for item in data2 for r in item["redu"]])
repl1.should.be.equal(repl2)
redu1.should.be.equal(redu2)
#p((repl1, repl2))
#p((redu1, redu2))
###### 2
syntagma = ["EMOIMG","EMOASC"]
data1 = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos", order_output_by_syntagma_order=False,if_type_pos_return_lexem_syn=False))
repl1 = sorted(data1[0]["repl"])
redu1 = sorted(data1[0]["redu"])
data2 = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos", order_output_by_syntagma_order=False,if_type_pos_return_lexem_syn=True))
repl2 = sorted([r for item in data2 for r in item["repl"]])
redu2 = sorted([r for item in data2 for r in item["redu"]])
repl1.should.be.equal(repl2)
redu1.should.be.equal(redu2)
#p((repl1, repl2))
#p((redu1, redu2))
###### 3
syntagma = ["EMOASC"]
data1 = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos", order_output_by_syntagma_order=False,if_type_pos_return_lexem_syn=False))
repl1 = sorted(data1[0]["repl"])
redu1 = sorted(data1[0]["redu"])
data2 = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos", order_output_by_syntagma_order=False,if_type_pos_return_lexem_syn=True))
ext_id = []
repl2 = []
for item in data2:
for r in item["repl"]:
if r[0] not in ext_id:
ext_id.append(r[0])
repl2.append(r)
repl2 = sorted(repl2)
ext_id = []
redu2 = []
for item in data2:
for r in item["redu"]:
if r[0] not in ext_id:
ext_id.append(r[0])
redu2.append(r)
redu2 = sorted(redu2)
if len(repl1) == len(repl2):
assert repl1 == repl2
else:
assert len(repl1)<= len(repl2)
if len(redu1) == len(redu2):
assert redu1 == redu2
else:
assert len(redu1)<= len(redu2)
#p((repl1, repl2))
#p((redu1, redu2))
################################################################################################
################################################################################################
###################################################################################################
############################stemmed_search = True #########################################
#################################################################################################
################################################################################################
################################################################################################
########stemmed_search=True #
### Case 10.1:
syntagma = ["klitze","kleines"]
items = stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem",stemmed_search=True, )
len1 = len(items)
items = list(items)
len2 = len(items)
len1.should.be.equal(len2)
#p((len1, len2))
#self.pretty_print_uniq(item)
#p(items,"item")
for item in items:
#p(item["syntagma"])
if item["syntagma"] == [u'klitzes', u'kleines']:
#self.pretty_print_uniq(item)
right_stem_syn = [u'klitz', u'klein']
right_repl = [
(49, 10000, u'[12, 3, 8]', u'[2, 10]', u'[2, 6]', u'klitzes', u'klitzes^4', u'klitz', u's', 4, 6, u'[2, 6]', u'FM', u'["neutral", 0.0]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None, None, None, None, None),
(50, 10000, u'[12, 3, 8]', u'[2, 11]', u'[2, 6]', u'klitzes', u'kli^3tzes^3', u'klitz', u'i', 3, 2, u'[2, 6]', u'FM', u'["neutral", 0.0]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None, None, None, None, None),
(51, 10000, u'[12, 3, 8]', u'[2, 11]', u'[2, 6]', u'klitzes', u'kli^3tzes^3', u'klitz', u's', 3, 6, u'[2, 6]', u'FM', u'["neutral", 0.0]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None, None, None, None, None),
(52, 10000, u'[12, 3, 8]', u'[2, 12]', u'[2, 7]', u'kleines', u'klein^3e^2s', u'klein', u'n', 3, 4, u'[2, 7]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', None, None, None, None, None, None, None, None, None, None),
(53, 10000, u'[12, 3, 8]', u'[2, 13]', u'[2, 7]', u'kleines', u'kleines^4', u'klein', u's', 4, 6, u'[2, 7]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', None, None, None, None, None, None, None, None, None, None),
]
right_syntagma = [u'klitzes', u'kleines']
right_baseline = ([[u'klitzes', u'kleines'], u'klitz++klein', 2, 1, u'[2, 2]', u'[3, 2]', u'[1, 1]', u'[2, 2]', u'1', u'1'],)
right_redu = [
(15, 10000, u'[12, 3, 8]', u'[2, 10]', u'[2, 6]', u'klitzes', u'klitz', u'{"klitzes^4": 1, "kli^3tzes^3": 1}', 2, u'FM', u'["neutral", 0.0]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None, None, None, None, None),
(16, 10000, u'[12, 3, 8]', u'[2, 12]', u'[2, 7]', u'kleines', u'klein', u'{"klein^3e^2s": 1, "kleines^4": 1}', 2, u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', None, None, None, None, None, None, None, None, None, None),
]
elif item["syntagma"] == [u'klitz', u'klein']:
#self.pretty_print_uniq(item)
right_stem_syn = [u'klitz', u'klein']
right_repl = [
(42, 10000, u'[12, 3, 8]', u'[2, 5]', u'[2, 3]', u'klitz', u'kli^4tz', u'klitz', u'i', 4, 2, u'[2, 3]', u'NE', u'["neutral", 0.0]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None),
(43, 10000, u'[12, 3, 8]', u'[2, 6]', u'[2, 3]', u'klitz', u'kli^4tz^3', u'klitz', u'i', 4, 2, u'[2, 3]', u'NE', u'["neutral", 0.0]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None),
(44, 10000, u'[12, 3, 8]', u'[2, 6]', u'[2, 3]', u'klitz', u'kli^4tz^3', u'klitz', u'z', 3, 4, u'[2, 3]', u'NE', u'["neutral", 0.0]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None),
(45, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'kle^3i^3n^3', u'klein', u'e', 3, 2, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(46, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'kle^3i^3n^3', u'klein', u'i', 3, 3, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(47, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'kle^3i^3n^3', u'klein', u'n', 3, 4, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(48, 10000, u'[12, 3, 8]', u'[2, 8]', u'[2, 4]', u'klein', u'klein^5', u'klein', u'n', 5, 4, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
]
right_syntagma = [u'klitz', u'klein']
right_baseline = ([[u'klitz', u'klein'], u'klitz++klein', 2, 1, u'[2, 2]', u'[3, 4]', u'[1, 1]', u'[3, 2]', u'1', u'1'],)
right_redu = [
(13, 10000, u'[12, 3, 8]', u'[2, 4]', u'[2, 3]', u'klitz', u'klitz', u'{"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}', 3, u'NE', u'["neutral", 0.0]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None),
(14, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'klein', u'{"kle^3i^3n^3": 1, "klein^5": 1}', 2, u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
]
elif item["syntagma"] == [u'klitze', u'kleine']:
#self.pretty_print_uniq(item)
right_stem_syn = [u'klitz', u'klein']
right_repl = [
(1, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'i', 4, 2, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 11]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze^7', u'klitz', u'e', 7, 5, u'[0, 0]', u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(20, 10000, u'[12, 3, 8]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'klitz', u'e', 4, 5, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'kleine', u'["ADJA", null, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]'),
(3, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(4, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'n', 5, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(5, 8888, u'[4, 11]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
]
right_syntagma = [u'klitze', u'kleine']
right_baseline = ([[u'klitze', u'kleine'], u'klitz++klein', 2, 4, u'[2, 3]', u'[3, 4]', u'[1, 1]', u'[2, 2]', u'2', u'1'],)
right_redu = [
(1, 8888, u'[4, 11]', u'[0, 0]', u'[0, 0]', u'klitze', u'klitz', u'{"klitze": 1, "kli^4tze^7": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5in^5e": 1, "klein^3e": 1}, "klein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]'),
(2, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5in^5e": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
]
else:
assert False
extracted_repl = item["repl"]
extracted_redu = item["redu"]
extracted_baseline = item["baseline"]
extracted_syntagma = item["syntagma"]
assert item["stem_syn"] == ["klitz", "klein"]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
#p(type(item), "item")
#p(item["baseline"])
#sys.exit()
#item["baseline"] = [unicode(item) for item in item["baseline"] ]
#item.should.be.equal({"repl":right_repl, "redu":right_redu,"baseline":[unicode(item) for item in right_baseline ], "syntagma":right_syntagma, "stem_syn":item["stem_syn"]})
###############################################################################################################################################
############################################### II. FullRepetativnes= False #################################################################################
#######################################################################################################################################
#p(stats._language)
stats.recompute_syntagma_repetativity_scope(False)
# # ####################################################################################
# # #################. WORK WITH EMOJIS #########################################
# # ####################################################################################
### Case 2.1:
syntagma = ["EMOIMG"]
data = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos"))
#p(data,"data")
#self.pretty_print_uniq(data[0],syn_order=False)
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = [
(8, 8888, u'[4, 11]', u'[1, 9]', u'[1, 9]', u'\U0001f600', u'\U0001f600^5', u'\U0001f600', u'\U0001f600', 5, 0, None, u'EMOIMG', u'["positive", 0.5]', u'gl\xfccklich', u'["ADJD", null, "glucklich"]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u':-)', u'["EMOASC", null, ":-)"]', u'-)', u'["EMOASC", null, "-)"]', u'-)', u'["EMOASC", {"-)^3": 2}, "-)"]', None, None, None, None, None, None, None, None),
(79, 12222, u'[24]', u'[0, 15]', u'[0, 12]', u'\U0001f62b', u'\U0001f62b^4', u'\U0001f62b', u'\U0001f62b', 4, 0, None, u'EMOIMG', u'["neutral", 0.0]', u'mal', u'["PTKMA", null, "mal"]', u'gerne', u'["ADV", null, "gern"]', u'hate', u'["VAFIN", null, "hat"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 1, u'["number", null, "1"]', u'du', u'["PPER", null, "du"]', u'meintest', u'["VVFIN", null, "meint"]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]'),
]
right_syntagma = [u'EMOIMG']
right_baseline = [[[u'\U0001f62b', u'1', u'du'], u'\U0001f62b++1++du', 3, 1, u'[1, 1, 0]', u'[1, 1, 0]', None, None, None, None], [[u'\U0001f600'], u'\U0001f600', 1, 1, u'1', u'1', None, None, u'1', None], [[u'\U0001f62b', u'1', u'du', u'meintest', u',', u'es'], u'\U0001f62b++1++du++meint++,++es', 6, 1, u'[1, 1, 0, 0, 0, 0]', u'[1, 1, 0, 0, 0, 0]', None, None, None, None], [[u'\U0001f62b', u'1', u'du', u'meintest'], u'\U0001f62b++1++du++meint', 4, 1, u'[1, 1, 0, 0]', u'[1, 1, 0, 0]', None, None, None, None], [[u'\U0001f62b'], u'\U0001f62b', 1, 1, u'1', u'1', None, None, u'1', None], [[u'\U0001f62b', u'1'], u'\U0001f62b++1', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None], [[u'\U0001f62b', u'1', u'du', u'meintest', u','], u'\U0001f62b++1++du++meint++,', 5, 1, u'[1, 1, 0, 0, 0]', u'[1, 1, 0, 0, 0]', None, None, None, None], [[u'\U0001f600', u'-)'], u'\U0001f600++-)', 2, 1, u'[1, 2]', u'[1, 2]', u'[0, 1]', u'[0, 2]', None, None]]
right_redu = []
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# # # ####################################################################################
# # # ####################################################################################
# # # #################################################################################
### Case 2.3:
syntagma = ["EMOASC","EMOIMG"]
data = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="pos"))
#p(data,"data")
#self.pretty_print_uniq(data[0],syn_order=False)
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = [
(7, 8888, u'[4, 11]', u'[1, 8]', u'[1, 8]', u'-)', u'-)^3', u'-)', u')', 3, 1, None, u'EMOASC', u'["positive", 0.5]', u'mich', u'["PPER", null, "mich"]', u'gl\xfccklich', u'["ADJD", null, "glucklich"]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u':-)', u'["EMOASC", null, ":-)"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'-)', u'["EMOASC", {"-)^3": 2}, "-)"]', None, None, None, None, None, None),
(8, 8888, u'[4, 11]', u'[1, 9]', u'[1, 9]', u'\U0001f600', u'\U0001f600^5', u'\U0001f600', u'\U0001f600', 5, 0, None, u'EMOIMG', u'["positive", 0.5]', u'gl\xfccklich', u'["ADJD", null, "glucklich"]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u':-)', u'["EMOASC", null, ":-)"]', u'-)', u'["EMOASC", null, "-)"]', u'-)', u'["EMOASC", {"-)^3": 2}, "-)"]', None, None, None, None, None, None, None, None),
]
right_syntagma = [u'EMOASC', u'EMOIMG']
right_baseline = [[[u'-)', u'\U0001f600'], u'-)++\U0001f600', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None], [[u'\U0001f600'], u'\U0001f600', 1, 1, u'1', u'1', None, None, u'1', None], [[u'\U0001f600', u'-)'], u'\U0001f600++-)', 2, 1, u'[1, 2]', u'[1, 2]', u'[0, 1]', u'[0, 2]', None, None], [[u'-)', u'\U0001f600', u'-)'], u'-)++\U0001f600++-)', 3, 1, u'[3, 1, "IGNOR"]', u'[3, 1, "IGNOR"]', u'[1, 0, "IGNOR"]', u'[2, 0, "IGNOR"]', None, None], [[u'-)'], u'-)', 1, 3, u'3', u'3', u'1', u'2', u'3', u'1']]
right_redu = []
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
# # ####################################################################################
# # #################. WORK WITH SENTIMENT #########################################
# # #################################################################################
### Case 3.1:
#- sentiment="positive"
syntagma = ["EMOASC"]
data = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment="positive", syntagma_type="pos"))
#p(data,"data")
#self.pretty_print_uniq(data[0],syn_order=False)
extracted_repl = data[0]["repl"]
extracted_redu = data[0]["redu"]
extracted_baseline = data[0]["baseline"]
extracted_syntagma = data[0]["syntagma"]
right_repl = [
(9, 8888, u'[4, 11]', u'[1, 10]', u'[1, 10]', u'-)', u'-)^3', u'-)', u')', 3, 1, u'[1, 10]', u'EMOASC', u'["positive", 0.5]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u':-)', u'["EMOASC", null, ":-)"]', u'-)', u'["EMOASC", null, "-)"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', None, None, None, None, None, None, None, None, None, None),
(10, 8888, u'[4, 11]', u'[1, 11]', u'[1, 10]', u'-)', u'-)^3', u'-)', u')', 3, 1, u'[1, 10]', u'EMOASC', u'["positive", 0.5]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u':-)', u'["EMOASC", null, ":-)"]', u'-)', u'["EMOASC", null, "-)"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', None, None, None, None, None, None, None, None, None, None),
(6, 8888, u'[4, 11]', u'[1, 7]', u'[1, 7]', u':-)', u':-)^4', u':-)', u')', 4, 2, None, u'EMOASC', u'["positive", 0.5]', u'sie', u'["PPER", null, "sie"]', u'mich', u'["PPER", null, "mich"]', u'gl\xfccklich', u'["ADJD", null, "glucklich"]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u'-)', u'["EMOASC", null, "-)"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'-)', u'["EMOASC", {"-)^3": 2}, "-)"]', None, None, None, None),
(7, 8888, u'[4, 11]', u'[1, 8]', u'[1, 8]', u'-)', u'-)^3', u'-)', u')', 3, 1, None, u'EMOASC', u'["positive", 0.5]', u'mich', u'["PPER", null, "mich"]', u'gl\xfccklich', u'["ADJD", null, "glucklich"]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u':-)', u'["EMOASC", null, ":-)"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', u'-)', u'["EMOASC", {"-)^3": 2}, "-)"]', None, None, None, None, None, None),
]
right_syntagma = [u'EMOASC']
right_baseline = [[[u'-)', u'\U0001f600'], u'-)++\U0001f600', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None], [[u':-)', u'-)', u'\U0001f600'], u':-)++-)++\U0001f600', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, None, None], [[u':-)', u'-)'], u':-)++-)', 2, 1, u'[1, 1]', u'[1, 1]', None, None, None, None], [[u'-)'], u'-)', 1, 3, u'3', u'3', u'1', u'2', u'3', u'1'], [[u':-)'], u':-)', 1, 1, u'1', u'1', None, None, u'1', None], [[u':-)', u'-)', u'\U0001f600', u'-)'], u':-)++-)++\U0001f600++-)', 4, 1, u'[1, 3, 1, "IGNOR"]', u'[1, 3, 1, "IGNOR"]', u'[0, 1, 0, "IGNOR"]', u'[0, 2, 0, "IGNOR"]', None, None], [[u'-)', u'\U0001f600', u'-)'], u'-)++\U0001f600++-)', 3, 1, u'[3, 1, "IGNOR"]', u'[3, 1, "IGNOR"]', u'[1, 0, "IGNOR"]', u'[2, 0, "IGNOR"]', None, None]]
right_redu = [(3, 8888, u'[4, 11]', u'[1, 10]', u'[1, 10]', u'-)', u'-)', u'{"-)^3": 2}', 2, u'EMOASC', u'["positive", 0.5]', u'gemacht', u'["VVPP", null, "gemacht"]', u'!', u'["symbol", null, "!"]', u':-)', u'["EMOASC", null, ":-)"]', u'-)', u'["EMOASC", null, "-)"]', u'\U0001f600', u'["EMOIMG", null, "\\ud83d\\ude00"]', None, None, None, None, None, None, None, None, None, None)]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
#p((extracted_syntagma, right_syntagma))
extracted_syntagma.should.be.equal(right_syntagma)
################################################################################################
################################################################################################
###################################################################################################
############################stemmed_search = True #########################################
#################################################################################################
################################################################################################
################################################################################################
########stemmed_search=True #
### Case 10.1:
syntagma = ["klein"]
items = list(stats.get_data(syntagma, repl=True, redu=True, baseline=True, sentiment=False, syntagma_type="lexem",stemmed_search=True))
for item in items:
#p(item["syntagma"])
if item["syntagma"] == [u'kleines']:
#self.pretty_print_uniq(item)
right_stem_syn = [u'klein']
right_repl = [
(52, 10000, u'[12, 3, 8]', u'[2, 12]', u'[2, 7]', u'kleines', u'klein^3e^2s', u'klein', u'n', 3, 4, u'[2, 7]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', None, None, None, None, None, None, None, None, None, None),
(53, 10000, u'[12, 3, 8]', u'[2, 13]', u'[2, 7]', u'kleines', u'kleines^4', u'klein', u's', 4, 6, u'[2, 7]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', None, None, None, None, None, None, None, None, None, None),
(66, 11111, u'[5, 6, 15, 3]', u'[3, 0]', u'[3, 0]', u'kleines', u'kleine^4s^7', u'klein', u'e', 4, 5, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(67, 11111, u'[5, 6, 15, 3]', u'[3, 0]', u'[3, 0]', u'kleines', u'kleine^4s^7', u'klein', u's', 7, 6, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(68, 11111, u'[5, 6, 15, 3]', u'[3, 1]', u'[3, 0]', u'kleines', u'klein^4e^3s^4', u'klein', u'n', 4, 4, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(69, 11111, u'[5, 6, 15, 3]', u'[3, 1]', u'[3, 0]', u'kleines', u'klein^4e^3s^4', u'klein', u'e', 3, 5, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(70, 11111, u'[5, 6, 15, 3]', u'[3, 1]', u'[3, 0]', u'kleines', u'klein^4e^3s^4', u'klein', u's', 4, 6, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(71, 11111, u'[5, 6, 15, 3]', u'[3, 2]', u'[3, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u'e', 4, 2, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(72, 11111, u'[5, 6, 15, 3]', u'[3, 2]', u'[3, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u'i', 5, 3, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(73, 11111, u'[5, 6, 15, 3]', u'[3, 2]', u'[3, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u'n', 3, 4, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(74, 11111, u'[5, 6, 15, 3]', u'[3, 2]', u'[3, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u's', 3, 6, u'[3, 0]', u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(26, 10000, u'[12, 3, 8]', u'[1, 0]', u'[1, 0]', u'kleines', u'kleine^4s^7', u'klein', u'e', 4, 5, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(27, 10000, u'[12, 3, 8]', u'[1, 0]', u'[1, 0]', u'kleines', u'kleine^4s^7', u'klein', u's', 7, 6, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(28, 10000, u'[12, 3, 8]', u'[1, 1]', u'[1, 0]', u'kleines', u'klein^4e^3s^4', u'klein', u'n', 4, 4, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(29, 10000, u'[12, 3, 8]', u'[1, 1]', u'[1, 0]', u'kleines', u'klein^4e^3s^4', u'klein', u'e', 3, 5, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(30, 10000, u'[12, 3, 8]', u'[1, 1]', u'[1, 0]', u'kleines', u'klein^4e^3s^4', u'klein', u's', 4, 6, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(31, 10000, u'[12, 3, 8]', u'[1, 2]', u'[1, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u'e', 4, 2, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(32, 10000, u'[12, 3, 8]', u'[1, 2]', u'[1, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u'i', 5, 3, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(33, 10000, u'[12, 3, 8]', u'[1, 2]', u'[1, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u'n', 3, 4, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
(34, 10000, u'[12, 3, 8]', u'[1, 2]', u'[1, 0]', u'kleines', u'kle^4i^5n^3e^2s^3', u'klein', u's', 3, 6, u'[1, 0]', u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
]
right_syntagma = [u'kleines']
right_baseline = ([[u'kleines'], u'klein', 1, 8, u'8', u'20', u'3', u'8', u'8', u'3'],)
right_redu = [
(16, 10000, u'[12, 3, 8]', u'[2, 12]', u'[2, 7]', u'kleines', u'klein', u'{"klein^3e^2s": 1, "kleines^4": 1}', 2, u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', None, None, None, None, None, None, None, None, None, None),
(17, 11111, u'[5, 6, 15, 3]', u'[3, 0]', u'[3, 0]', u'kleines', u'klein', u'{"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}', 3, u'NN', u'["neutral", 0.0]', 3, u'["number", null, "3"]', 4, u'["number", null, "4"]', 5, u'["number", null, "5"]', 6, u'["number", null, "6"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(11, 10000, u'[12, 3, 8]', u'[1, 0]', u'[1, 0]', u'kleines', u'klein', u'{"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}', 3, u'NN', u'["neutral", 0.0]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]', u'beser', u'["ADJD", null, "bes"]', u'kan', u'["FM", {"ka^4n^5": 1, "kan^6": 1}, "kan"]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]'),
]
elif item["syntagma"] == [u'kleinere']:
#self.pretty_print_uniq(item)
right_stem_syn = [u'klein']
right_repl = [
(37, 10000, u'[12, 3, 8]', u'[2, 0]', u'[2, 0]', u'kleinere', u'kleinere^5', u'klein', u'e', 5, 7, u'[2, 0]', u'NE', u'["neutral", 0.0]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]'),
(38, 10000, u'[12, 3, 8]', u'[2, 1]', u'[2, 0]', u'kleinere', u'kleine^3r^2e^5', u'klein', u'e', 3, 5, u'[2, 0]', u'NE', u'["neutral", 0.0]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]'),
(39, 10000, u'[12, 3, 8]', u'[2, 1]', u'[2, 0]', u'kleinere', u'kleine^3r^2e^5', u'klein', u'e', 5, 7, u'[2, 0]', u'NE', u'["neutral", 0.0]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]'),
]
right_syntagma = [u'kleinere']
right_baseline = ([[u'kleinere'], u'klein', 1, 2, u'2', u'3', u'1', u'2', u'2', u'1'],)
right_redu = [(12, 10000, u'[12, 3, 8]', u'[2, 0]', u'[2, 0]', u'kleinere', u'klein', u'{"kleinere^5": 1, "kleine^3r^2e^5": 1}', 2, u'NE', u'["neutral", 0.0]', u'es', u'["VVFIN", null, "es"]', u'.', u'["symbol", null, "."]', u'kleines', u'["NN", {"kle^4i^5n^3e^2s^3": 1, "klein^4e^3s^4": 1, "kleine^4s^7": 1}, "klein"]', u'm\xe4dchen', u'["NN", null, "madch"]', u'.', u'["symbol", null, "."]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'klein', u'["FM", {"kle^3i^3n^3": 1, "klein^5": 1}, "klein"]', u'.', u'["symbol", null, "."]')]
elif item["syntagma"] == [u'kleine']:
#self.pretty_print_uniq(item)
right_stem_syn = [u'klein']
right_repl = [
(82, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 4, 2, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(83, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'i', 5, 3, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(84, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(85, 12222, u'[24]', u'[0, 24]', u'[0, 21]', u'kleine', u'kle^4i^5n^4e^8', u'klein', u'e', 8, 5, None, u'ADJA', u'["neutral", 0.0]', u',', u'["symbol", null, ","]', u'es', u'["PPER", null, "es"]', u'war', u'["VAFIN", null, "war"]', u'so', u'["ADV", null, "so"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', None, None, None, None, None, None),
(3, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'e', 5, 2, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(4, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5in^5e', u'klein', u'n', 5, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(5, 8888, u'[4, 11]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'klein', u'n', 3, 4, u'[0, 1]', u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]'),
(21, 10000, u'[12, 3, 8]', u'[0, 2]', u'[0, 2]', u'kleine', u'kle^5ine', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', None, None, None, None, None, None, u'eine', u'["ART", null, "ein"]', u'klitze', u'["ADJA", null, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'@sch\xf6nesleben', u'["mention", null, "@schonesleb"]', u'#machwasdaraus', u'["hashtag", null, "#machwasdaraus"]', u'#bewegedeinarsch', u'["hashtag", null, "#bewegedeinarsch"]', u'https://www.freiesinternet.de', u'["URL", null, "https://www.freiesinternet.d"]'),
(57, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 2, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(58, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'n', 4, 4, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
(59, 11111, u'[5, 6, 15, 3]', u'[2, 4]', u'[2, 4]', u'kleine', u'kle^5i^2n^4e^5', u'klein', u'e', 5, 5, None, u'ADJA', u'["neutral", 0.0]', u'!', u'["symbol", null, "!"]', u'weil', u'["KOUS", null, "weil"]', u'es', u'["PPER", null, "es"]', u'ja', u'["PTKMA", null, "ja"]', u'eine', u'["ART", null, "ein"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'ist', u'["VAFIN", null, "ist"]', u'.', u'["symbol", null, "."]', 1, u'["number", null, "1"]', 2, u'["number", null, "2"]'),
]
right_syntagma = [u'kleine']
right_baseline = ([[u'kleine'], u'klein', 1, 7, u'5', u'11', u'1', u'2', u'5', u'1'],)
right_redu = [(2, 8888, u'[4, 11]', u'[0, 2]', u'[0, 1]', u'kleine', u'klein', u'{"kle^5in^5e": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze^7": 1}, "klitz"]', u'\xfcberaschung', u'["NN", null, "uberasch"]', u'.', u'["symbol", null, "."]', u'trotzdem', u'["PAV", null, "trotzd"]', u'hat', u'["VAFIN", null, "hat"]', u'sie', u'["PPER", null, "sie"]')]
elif item["syntagma"] == [u'klein']:
#self.pretty_print_uniq(item)
right_stem_syn = [u'klein']
right_repl = [
(45, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'kle^3i^3n^3', u'klein', u'e', 3, 2, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(46, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'kle^3i^3n^3', u'klein', u'i', 3, 3, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(47, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'kle^3i^3n^3', u'klein', u'n', 3, 4, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
(48, 10000, u'[12, 3, 8]', u'[2, 8]', u'[2, 4]', u'klein', u'klein^5', u'klein', u'n', 5, 4, u'[2, 4]', u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None),
]
right_syntagma = [u'klein']
right_baseline = ([[u'klein'], u'klein', 1, 2, u'2', u'4', u'1', u'2', u'2', u'1'],)
right_redu = [(14, 10000, u'[12, 3, 8]', u'[2, 7]', u'[2, 4]', u'klein', u'klein', u'{"kle^3i^3n^3": 1, "klein^5": 1}', 2, u'FM', u'["neutral", 0.0]', u'.', u'["symbol", null, "."]', u'kleinere', u'["NE", {"kleinere^5": 1, "kleine^3r^2e^5": 1}, "klein"]', u'auswahl', u'["NN", null, "auswahl"]', u'.', u'["symbol", null, "."]', u'klitz', u'["NE", {"kli^4tz": 1, "klitz": 1, "kli^4tz^3": 1}, "klitz"]', u'.', u'["symbol", null, "."]', u'klitzes', u'["FM", {"klitzes^4": 1, "kli^3tzes^3": 1}, "klitz"]', u'kleines', u'["FM", {"klein^3e^2s": 1, "kleines^4": 1}, "klein"]', None, None, None, None)]
else:
assert False
extracted_repl = item["repl"]
extracted_redu = item["redu"]
extracted_baseline = item["baseline"]
extracted_syntagma = item["syntagma"]
item["stem_syn"] = ["klein"]
set(self.convert_all_lists_to_tuples(extracted_repl)).should.be.equal(set(self.convert_all_lists_to_tuples(right_repl)))
set(self.convert_all_lists_to_tuples(extracted_redu)).should.be.equal(set(self.convert_all_lists_to_tuples(right_redu)))
set(list( tuple(unicode(elem) for elem in item ) for item in extracted_baseline)).should.be.equal(set(list( tuple( unicode(elem) for elem in item ) for item in right_baseline)))
extracted_syntagma.should.be.equal(right_syntagma)
#item["baseline"] = [unicode(item) for item in item["baseline"] ]
#item.should.be.equal({"repl":right_repl, "redu":right_redu,"baseline":[unicode(item) for item in right_baseline ], "syntagma":right_syntagma, "stem_syn":item["stem_syn"]})
@attr(status='stable')
#@wipd
def test_test_get_header_for_exhausted_output_table_type_612_1(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode="silent",use_cash=True)#, )
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_de))
## Case 1:
cols = stats._get_header_exhausted(repl=False, redu=False, baseline=False, additional_doc_cols=False, context_len_left=True, context_len_right=True)
#p(cols, "cols")
assert not cols
#for k,v in cols.iteritems():
# assert not v
###### repl = True
## Case 2.1:
cols = stats._get_header_exhausted(repl=True, redu=False, baseline=False, additional_doc_cols=False, context_len_left=True, context_len_right=True)
#p(cols, "cols")
assert not cols
# print cols["repl"]
# print cols["redu"]
# print cols["word"]
# print cols["document"]
# print cols["context"]
# cols["repl"].should.be.equal(('id', 'index_in_corpus', 'index_in_redufree', 'repl_letter', 'repl_length', 'index_of_repl', 'in_redu'))
# assert not cols["redu"]
# cols["word"].should.be.equal(('normalized_word', 'rle_word', 'stemmed', 'pos', 'polarity'))
# cols["document"].should.be.equal((('doc_id', 'redufree_len'),))
# cols["context"].should.be.equal(('contextL5', 'context_infoL5', 'contextL4', 'context_infoL4', 'contextL3', 'context_infoL3', 'contextL2', 'context_infoL2', 'contextL1', 'context_infoL1', 'contextR1', 'context_infoR1', 'contextR2', 'context_infoR2', 'contextR3', 'context_infoR3', 'contextR4', 'context_infoR4', 'contextR5', 'context_infoR5'))
# assert not cols["baseline"]
## Case 2.2:
cols = stats._get_header_exhausted(repl=True, redu=False, baseline=False, additional_doc_cols=False, context_len_left=True, context_len_right=True)
#p(cols, "cols")
assert not cols
# print cols["repl"]
# print cols["redu"]
# print cols["word"]
# print cols["document"]
# print cols["context"]
# cols["repl"].should.be.equal(('id', 'index_in_corpus', 'index_in_redufree', 'repl_letter', 'repl_length', 'index_of_repl', 'in_redu'))
# assert not cols["redu"]
# cols["word"].should.be.equal(('normalized_word', 'rle_word', 'stemmed', 'pos', 'polarity'))
# cols["document"].should.be.equal((('doc_id', 'redufree_len'),))
# cols["context"].should.be.equal(('contextL5', 'context_infoL5', 'contextL4', 'context_infoL4', 'contextL3', 'context_infoL3', 'contextL2', 'context_infoL2', 'contextL1', 'context_infoL1', 'contextR1', 'context_infoR1', 'contextR2', 'context_infoR2', 'contextR3', 'context_infoR3', 'contextR4', 'context_infoR4', 'contextR5', 'context_infoR5'))
# assert not cols["baseline"]
## Case 2.3:
cols = stats._get_header_exhausted(repl=True, redu=False, baseline=True, additional_doc_cols=False, context_len_left=1, context_len_right=1)
#p(cols, "cols")
# print cols["repl"]
# print cols["redu"]
# print cols["word"]
# print cols["document"]
#print cols["context"]
#print cols["baseline"]
cols["repl"].should.be.equal(('id', 'index_in_corpus', 'index_in_redufree', 'repl_letter', 'repl_length', 'index_of_repl', 'in_redu'))
assert not cols["redu"]
cols["word"].should.be.equal(('normalized_word', 'rle_word', 'stemmed', 'pos', 'polarity'))
cols["document"].should.be.equal((('doc_id', 'redufree_len'), None))
cols["context"].should.be.equal(('contextL1', 'context_infoL1', 'contextR1', 'context_infoR1'))
cols["baseline"].should.be.equal(('syntagma', 'stemmed', 'scope', 'occur_syntagma_all', 'occur_repl_uniq', 'occur_repl_exhausted', 'occur_full_syn_repl'))
## Case 2.4:
cols = stats._get_header_exhausted(repl=True, redu=False, baseline=False, additional_doc_cols=False, context_len_left=False, context_len_right=False)
assert not cols
#p(cols, "cols")
# print cols["repl"]
# print cols["redu"]
# print cols["word"]
# print cols["document"]
#print cols["context"]
# cols["repl"].should.be.equal(('id', 'index_in_corpus', 'index_in_redufree', 'repl_letter', 'repl_length', 'index_of_repl', 'in_redu'))
# assert not cols["redu"]
# cols["word"].should.be.equal(('normalized_word', 'rle_word', 'stemmed', 'pos', 'polarity'))
# cols["document"].should.be.equal((('doc_id', 'redufree_len'),))
# assert not cols["context"]
# assert not cols["baseline"]
## Case 2.5:
#additional_doc_cols = True
cols = stats._get_header_exhausted(repl=True, redu=False, baseline=False, additional_doc_cols=["gender", "sex"], context_len_left=False, context_len_right=False)
assert not cols
#p(cols, "cols")
# print cols["repl"]
# print cols["redu"]
# print cols["word"]
#print cols["document"]
#print cols["context"]
# cols["repl"].should.be.equal(('id', 'index_in_corpus', 'index_in_redufree', 'repl_letter', 'repl_length', 'index_of_repl', 'in_redu'))
# assert not cols["redu"]
# cols["word"].should.be.equal(('normalized_word', 'rle_word', 'stemmed', 'pos', 'polarity'))
# cols["document"].should.be.equal((('doc_id', 'redufree_len'), ('gender', 'sex')))
# assert not cols["context"]
# assert not cols["baseline"]
###### redu ; baseline
## Case 2.1:
cols = stats._get_header_exhausted(repl=False, redu=True, baseline=False, additional_doc_cols=False, context_len_left=True, context_len_right=True)
assert not cols
#p(cols, "cols")
# print cols["repl"]
# print cols["redu"]
# print cols["word"]
# print cols["document"]
# print cols["context"]
# print cols["baseline"]
# assert not cols["repl"]
# cols["redu"].should.be.equal(('id', 'index_in_corpus', 'index_in_redufree', 'orig_words', 'redu_length'))
# cols["word"].should.be.equal(('normalized_word', 'stemmed', 'pos', 'polarity'))
# cols["document"].should.be.equal((('doc_id', 'redufree_len'),))
# cols["context"].should.be.equal(('contextL5', 'context_infoL5', 'contextL4', 'context_infoL4', 'contextL3', 'context_infoL3', 'contextL2', 'context_infoL2', 'contextL1', 'context_infoL1', 'contextR1', 'context_infoR1', 'contextR2', 'context_infoR2', 'contextR3', 'context_infoR3', 'contextR4', 'context_infoR4', 'contextR5', 'context_infoR5'))
# assert not cols["baseline"]
###### redu baseline
## Case 2.2:
cols = stats._get_header_exhausted(repl=False, redu=True, baseline=True, additional_doc_cols=False, context_len_left=True, context_len_right=True)
#p(cols, "cols")
# print cols["repl"]
# print cols["redu"]
# print cols["word"]
# print cols["document"]
# print cols["context"]
#print cols["baseline"]
assert not cols["repl"]
cols["redu"].should.be.equal(('id', 'index_in_corpus', 'index_in_redufree', 'orig_words', 'redu_length'))
cols["word"].should.be.equal(('normalized_word', 'stemmed', 'pos', 'polarity'))
cols["document"].should.be.equal((('doc_id', 'redufree_len'), None))
cols["context"].should.be.equal(('contextL5', 'context_infoL5', 'contextL4', 'context_infoL4', 'contextL3', 'context_infoL3', 'contextL2', 'context_infoL2', 'contextL1', 'context_infoL1', 'contextR1', 'context_infoR1', 'contextR2', 'context_infoR2', 'contextR3', 'context_infoR3', 'contextR4', 'context_infoR4', 'contextR5', 'context_infoR5'))
cols["baseline"].should.be.equal(('syntagma', 'stemmed', 'scope', 'occur_syntagma_all', 'occur_redu_uniq', 'occur_redu_exhausted', 'occur_full_syn_redu'))
###### repl=True; redu = True;
## Case 3.1:
cols = stats._get_header_exhausted(repl=True, redu=True, baseline=False, additional_doc_cols=False, context_len_left=True, context_len_right=True)
assert not cols
#p(cols, "cols")
# print cols["repl"]
# print cols["redu"]
# print cols["word"]
# print cols["document"]
# print cols["context"]
# print cols["baseline"]
# cols["repl"].should.be.equal(('id', 'index_in_corpus', 'index_in_redufree', 'repl_letter', 'repl_length', 'index_of_repl', 'in_redu'))
# cols["redu"].should.be.equal(('id', 'index_in_corpus', 'index_in_redufree', 'orig_words', 'redu_length'))
# cols["word"].should.be.equal(('normalized_word', 'rle_word', 'stemmed', 'pos', 'polarity'))
# cols["document"].should.be.equal((('doc_id', 'redufree_len'),))
# cols["context"].should.be.equal(('contextL5', 'context_infoL5', 'contextL4', 'context_infoL4', 'contextL3', 'context_infoL3', 'contextL2', 'context_infoL2', 'contextL1', 'context_infoL1', 'contextR1', 'context_infoR1', 'contextR2', 'context_infoR2', 'contextR3', 'context_infoR3', 'contextR4', 'context_infoR4', 'contextR5', 'context_infoR5'))
# assert not cols["baseline"]
###### repl=True; redu = True; baseline=True;
## Case 4.1:
cols = stats._get_header_exhausted(repl=True, redu=True, baseline=True, additional_doc_cols=False, context_len_left=True, context_len_right=True)
#p(cols, "cols")
# print cols["repl"]
# print cols["redu"]
# print cols["word"]
# print cols["document"]
# print cols["context"]
#print cols["baseline"]
cols["repl"].should.be.equal(('id', 'index_in_corpus', 'index_in_redufree', 'repl_letter', 'repl_length', 'index_of_repl', 'in_redu'))
cols["redu"].should.be.equal(('id', 'index_in_corpus', 'index_in_redufree', 'orig_words', 'redu_length'))
cols["word"].should.be.equal(('normalized_word', 'rle_word', 'stemmed', 'pos', 'polarity'))
cols["document"].should.be.equal((('doc_id', 'redufree_len'), None))
cols["context"].should.be.equal(('contextL5', 'context_infoL5', 'contextL4', 'context_infoL4', 'contextL3', 'context_infoL3', 'contextL2', 'context_infoL2', 'contextL1', 'context_infoL1', 'contextR1', 'context_infoR1', 'contextR2', 'context_infoR2', 'contextR3', 'context_infoR3', 'contextR4', 'context_infoR4', 'contextR5', 'context_infoR5'))
cols["baseline"].should.be.equal(('syntagma', 'stemmed', 'scope', 'occur_syntagma_all', 'occur_repl_uniq', 'occur_repl_exhausted', 'occur_redu_uniq', 'occur_redu_exhausted', 'occur_full_syn_repl', 'occur_full_syn_redu'))
@attr(status='stable')
# @wipd
def test_test_get_header_for_sum_output_table_type_612_2(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True)#, )
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_de))
## Case 1:
cols = stats._get_header_sum(repl=False, redu=False, word_examples_sum_table=True)
#p(cols, "cols")
cols.should.be.equal(False)
#### repl #####
## Case 2.1:
cols = stats._get_header_sum(repl=True, redu=False, word_examples_sum_table=True)
#p(cols, "cols")
cols.should.be.equal(('letter', 'NrOfRepl', 'Occur', 'Examples'))
## Case 2.2:
cols = stats._get_header_sum(repl=True, redu=False, word_examples_sum_table=False)
#p(cols, "cols")
cols.should.be.equal(('letter', 'NrOfRepl', 'Occur'))
#### redu #####
## Case 2.1:
cols = stats._get_header_sum(repl=False, redu=True, word_examples_sum_table=True)
#p(cols, "cols")
cols.should.be.equal(('word', 'ReduLength', 'Occur'))
## Case 2.2:
cols = stats._get_header_sum(repl=False, redu=True, word_examples_sum_table=False)
#p(cols, "cols")
cols.should.be.equal(('word', 'ReduLength', 'Occur'))
@attr(status='stable')
#@wipd
def test_export_613_1(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True, status_bar=True)#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_de))
#stats.export(self.tempdir_project_folder, redu=True, repl=True, export_file_type="csv", max_scope=3)
#p( stats.statsdb.getall("replications"))
########################################
######## EXHAUSTED TYPE ###########
########################################
rewrite= True
#self.tempdir_project_folder = "./export"
######### I FOR ALL SYNTAGMA ##############
##### Export in dirr formats
#stats.recompute_syntagma_repetativity_scope(False)
stats.export(self.tempdir_project_folder, repl=True, redu=True,export_file_type="csv",output_table_type="exhausted",rewrite=False,max_scope=2)
stats.export(self.tempdir_project_folder, repl=True, redu=True,export_file_type="xml",output_table_type="exhausted",rewrite=rewrite,)
stats.export(self.tempdir_project_folder, repl=True, redu=True,export_file_type="json",output_table_type="exhausted",rewrite=rewrite,)
# #### Export with additional columns from CorpusDB
stats.export(self.tempdir_project_folder, repl=True, redu=True,export_file_type="csv",output_table_type="exhausted",rewrite=rewrite,
additional_doc_cols=["gender", "working_area", "age"], fname="WITH_ADDIT_COLS_FROM_CORP",
path_to_corpdb=os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_de))
#### Export with NULL Kontext
stats.export(self.tempdir_project_folder, repl=True, redu=True,export_file_type="csv",output_table_type="exhausted",rewrite=rewrite,
context_len_left=False, context_len_right=False, fname="NULL_KONTEXT")
stats.export(self.tempdir_project_folder, repl=True, redu=True,export_file_type="csv",output_table_type="exhausted",rewrite=rewrite,
context_len_left=1, context_len_right=2, fname="1_2_KONTEXT_")
#### Export with NULL Kontext
stats.export(self.tempdir_project_folder, repl=True, redu=True,export_file_type="csv",output_table_type="exhausted",rewrite=rewrite,
max_scope=1, fname="MAX_SCOPE_VON_ONE")
#
# # # ######### II FOR FEW SYNTAGMA #############
##### stemmed search
stats.export(self.tempdir_project_folder, syntagma=["klitze"], repl=True, redu=True,export_file_type="csv",output_table_type="exhausted",rewrite=rewrite,
stemmed_search=True, fname="STEMMED_FOR_KLITZE")
stats.export(self.tempdir_project_folder, syntagma=["klitze", "kleine"], repl=True, redu=True,export_file_type="csv",output_table_type="exhausted",rewrite=rewrite,
stemmed_search=True, fname="STEMMED_FOR_KLITZE_KLEINE")
stats.export(self.tempdir_project_folder, syntagma=["klitze", "kleine"], repl=True, redu=True,export_file_type="csv",output_table_type="exhausted",rewrite=rewrite,
stemmed_search=False, fname="UN_STEMMED_FOR_KLITZE_KLEINE")
stats.export(self.tempdir_project_folder, syntagma=[["klitze", "kleine"],["klitzes", "kleines"],["klitz", "klein"]], repl=True, redu=True,export_file_type="csv",output_table_type="exhausted",rewrite=rewrite,
stemmed_search=False, fname="UN_STEMMED_FOR_ALL_KLITZ_KLEIN")
# ##### pos search
stats.export(self.tempdir_project_folder, syntagma=["EMOIMG", "EMOASC"], repl=True, redu=True,export_file_type="csv",output_table_type="exhausted",rewrite=rewrite,
stemmed_search=False, fname="EMOIMG_EMOASC", syntagma_type="pos")
stats.export(self.tempdir_project_folder, syntagma=["EMOIMG"], repl=True, redu=True,export_file_type="csv",output_table_type="exhausted",rewrite=rewrite,
stemmed_search=False, fname="EMOIMG", syntagma_type="pos")
stats.export(self.tempdir_project_folder, syntagma=["EMOASC"], repl=True, redu=True,export_file_type="csv",output_table_type="exhausted",rewrite=rewrite,
stemmed_search=False, fname="EMOASC", syntagma_type="pos")
# # ########################################
# # ######## SUM TYPE ###########
# # ########################################
stats.export(self.tempdir_project_folder, repl=True, redu=False,export_file_type="csv", output_table_type="sum", fname="SUM_REPL",rewrite=rewrite,)
stats.export(self.tempdir_project_folder, repl=False, redu=True,export_file_type="csv", output_table_type="sum", fname="SUM_REDU",rewrite=rewrite,)
stats.export(self.tempdir_project_folder,syntagma=["EMOIMG"], repl=True, redu=False,export_file_type="csv", rewrite=rewrite,
output_table_type="sum", fname="SUM_REPL_EMOIMG",syntagma_type="pos")
stats.export(self.tempdir_project_folder,syntagma=["EMOASC"], repl=False, redu=True,export_file_type="csv", rewrite=rewrite,
output_table_type="sum", fname="SUM_REDU_EMOASC",syntagma_type="pos")
files = os.listdir(self.tempdir_project_folder)
#p(files)
len(files).should.be.equal(18)
@attr(status='stable')
#@wipd
def test_test_export_generator_structure_613_2(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True, status_bar=True)#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_de))
stats.attach_corpdb(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_de))
# ##################
# #### Case 1.1 ######
# ##################
# repl = True
# redu = True
# baseline = True
# output_table_type = "exhausted"
# max_scope = False
# additional_doc_cols = ()
# context_len_left = True
# context_len_right = True
# word_examples_sum_table = True
# header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
# col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
# #p(col_num, "col_num")
# assert col_num == 49
# #p(header, "header")
# #syntagma = ["kleine"]
# #syntagma = "*"
# syntagma = "*"
# stemmed_search = False
# data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search, max_scope=3)
# for i,row in enumerate(data):
# if not row: continue
# len(row).should.be.equal(col_num)
# #p(row, "row")
# p(i, "i")
############################################################
####### output_table_type = "exhausted" ########
##################################################################
##################
#### Case 1.1 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
#p(col_num, "col_num")
assert col_num == 49
#p(header, "header")
#syntagma = ["kleine"]
#syntagma = "*"
syntagma = ["klitze","kleine"]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
for row in data:
if not row: continue
len(row).should.be.equal(col_num)
#p(row, "row")
##################
#### Case 1.2 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
#p(col_num, "col_num")
assert col_num == 49
#p(header, "header")
#syntagma = ["kleine"]
#syntagma = "*"
syntagma = ["klitze","kleine"]
stemmed_search = True
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
for row in data:
if not row: continue
len(row).should.be.equal(col_num)
#p(row, "row")
##################
#### Case 1.3 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
#p(col_num, "col_num")
assert col_num == 49
#p(header, "header")
#syntagma = ["kleine"]
syntagma = "*"
#syntagma = ["klitze","kleine"]
stemmed_search = True
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
for row in data:
if not row: continue
len(row).should.be.equal(col_num)
#p(row, "row")
##################
#### Case 1.3 ######
##################
repl = False
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
#p(col_num, "col_num")
assert col_num == 38
#p(header, "header")
#syntagma = ["kleine"]
syntagma = "*"
#syntagma = ["klitze","kleine"]
stemmed_search = True
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
for row in data:
if not row: continue
len(row).should.be.equal(col_num)
#p(row, "row")
##################
#### Case 1.4 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
#p(col_num, "col_num")
assert col_num == 49
#p(header, "header")
#syntagma = ["kleine"]
syntagma = "*"
#syntagma = ["klitze","kleine"]
stemmed_search = True
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search,sentiment="positive")
for row in data:
if not row: continue
len(row).should.be.equal(col_num)
#p(row, "row")
##################
#### Case 1.4 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
#p(col_num, "col_num")
assert col_num == 49
#p(header, "header")
#syntagma = ["kleine"]
#syntagma = "*"
rows_equal = []
rows_not_equal = []
counter2 = 0
syntagma = [["klitzes","kleines"], ["klitz","klein"], ["klitze","kleine"]]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
for row in data:
if row:
rows_equal.append(row)
counter2 += 1
len(row).should.be.equal(col_num)
#p(row, "row")
counter1 = 0
syntagma = ["klitze","kleine"]
stemmed_search = True
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
#list(data)
# if not data:
# assert False
rows = []
for row in data:
if row:
#rows.add(row)#
if not row: continue
counter1 += 1
len(row).should.be.equal(col_num)
if row not in rows_equal:
rows_not_equal.append(row)
#p(row, "row")
counter1.should.be.equal(counter2)
assert not rows_not_equal
#p((counter1, counter2))
# #################
# ### Case 1.4 ######
# ##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
#p(col_num, "col_num")
assert col_num == 49
#p(header, "header")
#syntagma = ["kleine"]
#syntagma = "*"
syntagma = [["klitze","kleine"]]
#syntagma = ["klitze","kleine"]
stemmed_search = True
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search,sentiment="neutral")
#p(data, "data")
if not data:
assert False
i = 0
for row in data:
#p(row, "row")
if not row: continue
i+= 1
len(row).should.be.equal(col_num)
#p(row, "row")
#assert i == 0
#p(list(stats.get_data([["klitze","kleine"]], repl=True, redu=True, baseline=True,send_empty_marker=True ) ))
##################
#### Case 2 ######
##################
repl = True
redu = False
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
#p(col_num, "col_num")
assert col_num == 41
#p(header, "header")
#syntagma = ["kleine"]
#syntagma = "*"
syntagma = ["klitze","kleine"]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
for row in data:
if not row: continue
len(row).should.be.equal(col_num)
#p(row, "row")
##################
#### Case 3 ######
##################
repl = False
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
#p(col_num, "col_num")
assert col_num == 38
#p(header, "header")
#syntagma = ["kleine"]
#syntagma = "*"
syntagma = ["klitze","kleine"]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
for row in data:
if not row: continue
len(row).should.be.equal(col_num)
#p(row, "row")
##################
#### Case 4 ######
##################
repl = False
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
#additional_doc_cols = ()
additional_doc_cols = ("gender", "age", "working_area")
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
#p(col_num, "col_num")
assert col_num == 41
stats.cols_exists_in_corpb(additional_doc_cols)
#p(header, "header")
#syntagma = ["kleine"]
#syntagma = "*"
syntagma = ["klitze","kleine"]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
for row in data:
if not row: continue
len(row).should.be.equal(col_num)
#p(row, "row")
##########################################################
##########################################################
##########################################################
############################################################
####### output_table_type = "sum" ########
##################################################################
##########################################################
##################
#### Case 1.1 ######
##################
repl = False
redu = True
baseline = True
output_table_type = "sum"
max_scope = False
word_examples_sum_table = True
additional_doc_cols = False
context_len_right =True
context_len_left = True
reptype_sum_table = "redu"
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = len(header)
#p(header, "header")
#assert col_num == 41
#stats.cols_exists_in_corpb(additional_doc_cols)
#p(header, "header")
#syntagma = ["kleine"]
syntagma = "*"
#syntagma = ["klitze","kleine"]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search,output_table_type=output_table_type, reptype_sum_table=reptype_sum_table)
for row in data:
if not row: continue
len(row).should.be.equal(col_num)
#p(row, "row")
##################
#### Case 1.2 ######
##################
repl = False
redu = True
baseline = True
output_table_type = "sum"
max_scope = False
word_examples_sum_table = True
additional_doc_cols = False
context_len_right =True
context_len_left = True
reptype_sum_table = "redu"
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = len(header)
sentiment = "positive"
#p(header, "header")
#assert col_num == 41
#stats.cols_exists_in_corpb(additional_doc_cols)
#p(header, "header")
#syntagma = ["kleine"]
syntagma = "*"
#syntagma = ["klitze","kleine"]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search,output_table_type=output_table_type, reptype_sum_table=reptype_sum_table,sentiment=sentiment)
for row in data:
if not row: continue
len(row).should.be.equal(col_num)
#p(row, "row")
##################
#### Case 2.1 ######
##################
repl = True
redu = False
baseline = True
output_table_type = "sum"
max_scope = False
word_examples_sum_table = True
additional_doc_cols = False
context_len_right =True
context_len_left = True
reptype_sum_table = "repl"
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = len(header)
syntagma = "*"
#syntagma = ["klitze","kleine"]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search,output_table_type=output_table_type, reptype_sum_table=reptype_sum_table)
for row in data:
#print "ghjk"
#p(row, "row")
if not row: continue
len(row).should.be.equal(col_num)
##################
#### Case 2.2 ######
##################
repl = True
redu = False
baseline = True
output_table_type = "sum"
max_scope = False
word_examples_sum_table = True
additional_doc_cols = False
context_len_right =True
context_len_left = True
reptype_sum_table = "repl"
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
col_num = len(header)
sentiment = "positive"
syntagma = "*"
#syntagma = ["klitze","kleine"]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search,output_table_type=output_table_type, reptype_sum_table=reptype_sum_table,sentiment=sentiment)
for row in data:
#print "ghjk"
#p(row, "row")
if not row: continue
len(row).should.be.equal(col_num)
def _summerize_reps3(self,header,data,redu=False, repl=True):
import copy
### Step 1: Summerizing
dict_repls = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda: 0)))
dict_redus = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda: 0)))
dict_baseline = defaultdict(dict)
if repl:
ix_repl_index = header.index("[repl].index_in_corpus")
ix_repl_id = header.index("[repl].id")
ix_occur_repl_uniq = header.index('[baseline].occur_repl_uniq')
ix_occur_repl_exhausted = header.index('[baseline].occur_repl_exhausted')
ix_occur_full_syn_repl = header.index('[baseline].occur_full_syn_repl')
if redu:
dict_redus = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda: 0)))
ix_redu_index = header.index("[redu].index_in_corpus")
ix_redu_lenght = header.index("[redu].redu_length")
ix_occur_redu_uniq = header.index('[baseline].occur_redu_uniq')
ix_occur_redu_exhausted = header.index('[baseline].occur_redu_exhausted')
ix_occur_full_syn_redu = header.index('[baseline].occur_full_syn_redu')
ix_redu_id = header.index("[redu].id")
ix_doc_id = header.index('[document].doc_id')
ix_syn = header.index("[baseline].syntagma")
ix_occur = header.index('[baseline].occur_syntagma_all')
ix_scope = header.index("[baseline].scope")
ix_word = header.index("[word].normalized_word")
### REPLS
if repl:
for row in data:
#word = row{}
repl_id = row[ix_repl_id]
if repl_id:
doc_id = row[ix_doc_id]
index_in_corpus = row[ix_repl_index]
word = row[ix_word]
dict_repls[word][doc_id][index_in_corpus] += 1
#p((doc_id, index_in_corpus,word,repl_id),c="m")
### REDUS
if redu:
for row in data:
redu_id = row[ix_redu_id]
if redu_id:
#p(row, "redu")
doc_id = row[ix_doc_id]
index_in_corpus = row[ix_redu_index]
word = row[ix_word]
redu_lenght = row[ix_redu_lenght]
#p((redu_id,doc_id,index_in_corpus,word,redu_lenght),c="c")
dict_redus[word][doc_id][index_in_corpus] = redu_lenght
### baseline
for row in data:
#word = row{}
syntagma = row[ix_syn]
scope = row[ix_scope]
occur_all = row[ix_occur]
dict_baseline[syntagma]["occur_all"] = occur_all
dict_baseline[syntagma]["scope"] = scope
if repl:
occur_repl_uniq= row[ix_occur_repl_uniq]
occur_repl_exhausted = row[ix_occur_repl_exhausted]
occur_full_syn_repl = row[ix_occur_full_syn_repl]
dict_baseline[syntagma]["occur_repl_uniq"] = occur_repl_uniq
dict_baseline[syntagma]["occur_repl_exhausted"] = occur_repl_exhausted
dict_baseline[syntagma]["occur_full_syn_repl"] = occur_full_syn_repl
if redu:
occur_redu_uniq= row[ix_occur_redu_uniq]
occur_redu_exhausted = row[ix_occur_redu_exhausted]
occur_full_syn_redu = row[ix_occur_full_syn_redu]
dict_baseline[syntagma]["occur_redu_uniq"] = occur_redu_uniq
dict_baseline[syntagma]["occur_redu_exhausted"] = occur_redu_exhausted
dict_baseline[syntagma]["occur_full_syn_redu"] = occur_full_syn_redu
##### Step 2: Counts
computed_counts = defaultdict(lambda:defaultdict(lambda:[0,0]))
if repl:
for word, word_data in dict_repls.items():
for doc_id, doc_data in word_data.items():
for index_in_corpus, counter in doc_data.items():
computed_counts[word]["repl"][0] += 1
computed_counts[word]["repl"][1] += counter
if redu:
for word, word_data in dict_redus.items():
for doc_id, doc_data in word_data.items():
for index_in_corpus, counter in doc_data.items():
computed_counts[word]["redu"][0] += 1
computed_counts[word]["redu"][1] += counter
temp_dict = {}
for syntagma, counter_data in dict_baseline.items():
for counter_name, num in counter_data.items():
temp_dict[counter_name] = num
computed_counts[syntagma]["baseline"] = temp_dict
return computed_counts
@attr(status='stable')
#@wipd
def test_test_export_generator_content_correctnes_613_3(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True, status_bar=True)#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
gold_standard_data = self.configer._counted_reps["en"]
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_en))
stats.attach_corpdb(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
# # ############################################################
# # ####### output_table_type = "exhausted" ########
# # ##################################################################
##################
#### Case 1.1 ######
##################
repl = True
redu = False
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = ["bad"]
right_data = gold_standard_data[syntagma[0]]
repl_num = right_data["repl"][1]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
data = list(data)
#p(data)
#p((len(data),repl_num))
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
if repl:
assert len(data) >= right_data["repl"][1]
#p(answer, "answer")
#len(data).should.be.equal()
tuple(right_data["repl"]).should.be.equal(tuple(answer[syntagma[0]]["repl"]))
baseline_entry_repl = (int(answer[syntagma[0]]["baseline"]['occur_repl_uniq']), int(answer[syntagma[0]]["baseline"]['occur_repl_exhausted']))
tuple(right_data["repl"]).should.be.equal(tuple(baseline_entry_repl))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#answer[syntagma[0]]["baseline"]['occur_full_syn_repl']
##################
#### Case 2.1 ######
##################
repl = True
redu = False
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'big']
right_data = gold_standard_data[syntagma[0]]
repl_num = right_data["repl"][1]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
data = list(data)
#p(data)
#p((len(data),repl_num))
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
if repl:
assert len(data) >= right_data["repl"][1]
#p(answer, "answer")
len(data).should.be.equal(right_data["repl"][1])
tuple(right_data["repl"]).should.be.equal(tuple(answer[syntagma[0]]["repl"]))
baseline_entry_repl = (int(answer[syntagma[0]]["baseline"]['occur_repl_uniq']), int(answer[syntagma[0]]["baseline"]['occur_repl_exhausted']))
tuple(right_data["repl"]).should.be.equal(tuple(baseline_entry_repl))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#answer[syntagma[0]]["baseline"]['occur_full_syn_repl']
##################
#### Case 2.2 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'big']
right_data = gold_standard_data[syntagma[0]]
repl_num = right_data["repl"][1]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
data = list(data)
#p(data)
#p((len(data),repl_num))
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
if repl:
assert len(data) >= right_data["repl"][1]
#p(answer, "answer")
### REPL
tuple(right_data["repl"]).should.be.equal(tuple(answer[syntagma[0]]["repl"]))
baseline_entry_repl = (int(answer[syntagma[0]]["baseline"]['occur_repl_uniq']), int(answer[syntagma[0]]["baseline"]['occur_repl_exhausted']))
tuple(right_data["repl"]).should.be.equal(tuple(baseline_entry_repl))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#answer[syntagma[0]]["baseline"]['occur_full_syn_repl']
### REDU
tuple(right_data["redu"]).should.be.equal(tuple(answer[syntagma[0]]["redu"]))
baseline_entry_redu = (int(answer[syntagma[0]]["baseline"]['occur_redu_uniq']), int(answer[syntagma[0]]["baseline"]['occur_redu_exhausted']))
tuple(right_data["redu"]).should.be.equal(tuple(baseline_entry_redu))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#p(list(stats.get_data(syntagma, repl=True, redu=True,baseline=True))[0]["redu"])
##################
#### Case 3.1 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'\U0001f308']
right_data = gold_standard_data[syntagma[0]]
repl_num = right_data["repl"][1]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
data = list(data)
#p(data)
#p((len(data),repl_num))
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
if repl:
assert len(data) >= right_data["repl"][1]
#p(answer, "answer")
### REPL
if "repl" in right_data:
tuple(right_data["repl"]).should.be.equal(tuple(answer[syntagma[0]]["repl"]))
baseline_entry_repl = (int(answer[syntagma[0]]["baseline"]['occur_repl_uniq']), int(answer[syntagma[0]]["baseline"]['occur_repl_exhausted']))
tuple(right_data["repl"]).should.be.equal(tuple(baseline_entry_repl))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#answer[syntagma[0]]["baseline"]['occur_full_syn_repl']
### REDU
if "redu" in right_data:
tuple(right_data["redu"]).should.be.equal(tuple(answer[syntagma[0]]["redu"]))
baseline_entry_redu = (int(answer[syntagma[0]]["baseline"]['occur_redu_uniq']), int(answer[syntagma[0]]["baseline"]['occur_redu_exhausted']))
tuple(right_data["redu"]).should.be.equal(tuple(baseline_entry_redu))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#p(list(stats.get_data(syntagma, repl=True, redu=True,baseline=True))[0]["redu"])
##################
#### Case 3.2 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'\U0001f600']
right_data = gold_standard_data[syntagma[0]]
repl_num = right_data["repl"][1]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
data = list(data)
#p(data)
#p((len(data),repl_num))
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
if repl:
assert len(data) >= right_data["repl"][1]
#p(answer, "answer")
### REPL
if "repl" in right_data:
tuple(right_data["repl"]).should.be.equal(tuple(answer[syntagma[0]]["repl"]))
baseline_entry_repl = (int(answer[syntagma[0]]["baseline"]['occur_repl_uniq']), int(answer[syntagma[0]]["baseline"]['occur_repl_exhausted']))
tuple(right_data["repl"]).should.be.equal(tuple(baseline_entry_repl))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#answer[syntagma[0]]["baseline"]['occur_full_syn_repl']
### REDU
if "redu" in right_data:
tuple(right_data["redu"]).should.be.equal(tuple(answer[syntagma[0]]["redu"]))
baseline_entry_redu = (int(answer[syntagma[0]]["baseline"]['occur_redu_uniq']), int(answer[syntagma[0]]["baseline"]['occur_redu_exhausted']))
tuple(right_data["redu"]).should.be.equal(tuple(baseline_entry_redu))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#p(list(stats.get_data(syntagma, repl=True, redu=True,baseline=True))[0]["redu"])
##################
#### Case 4.1 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u':-(']
right_data = gold_standard_data[syntagma[0]]
repl_num = right_data["repl"][1]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
data = list(data)
#p(data)
#p((len(data),repl_num))
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
if repl:
assert len(data) >= right_data["repl"][1]
#p(answer, "answer")
### REPL
if "repl" in right_data:
tuple(right_data["repl"]).should.be.equal(tuple(answer[syntagma[0]]["repl"]))
baseline_entry_repl = (int(answer[syntagma[0]]["baseline"]['occur_repl_uniq']), int(answer[syntagma[0]]["baseline"]['occur_repl_exhausted']))
tuple(right_data["repl"]).should.be.equal(tuple(baseline_entry_repl))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#answer[syntagma[0]]["baseline"]['occur_full_syn_repl']
### REDU
if "redu" in right_data:
tuple(right_data["redu"]).should.be.equal(tuple(answer[syntagma[0]]["redu"]))
baseline_entry_redu = (int(answer[syntagma[0]]["baseline"]['occur_redu_uniq']), int(answer[syntagma[0]]["baseline"]['occur_redu_exhausted']))
tuple(right_data["redu"]).should.be.equal(tuple(baseline_entry_redu))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#p(list(stats.get_data(syntagma, repl=True, redu=True,baseline=True))[0]["redu"])
##################
#### Case 4.2 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'1']
right_data = gold_standard_data[syntagma[0]]
repl_num = right_data["repl"][1]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
data = list(data)
#p(data)
#p((len(data),repl_num))
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
if repl:
assert len(data) >= right_data["repl"][1]
#p(answer, "answer")
### REPL
if "repl" in right_data:
tuple(right_data["repl"]).should.be.equal(tuple(answer[syntagma[0]]["repl"]))
baseline_entry_repl = (int(answer[syntagma[0]]["baseline"]['occur_repl_uniq']), int(answer[syntagma[0]]["baseline"]['occur_repl_exhausted']))
tuple(right_data["repl"]).should.be.equal(tuple(baseline_entry_repl))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#answer[syntagma[0]]["baseline"]['occur_full_syn_repl']
### REDU
if "redu" in right_data:
tuple(right_data["redu"]).should.be.equal(tuple(answer[syntagma[0]]["redu"]))
baseline_entry_redu = (int(answer[syntagma[0]]["baseline"]['occur_redu_uniq']), int(answer[syntagma[0]]["baseline"]['occur_redu_exhausted']))
tuple(right_data["redu"]).should.be.equal(tuple(baseline_entry_redu))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#p(list(stats.get_data(syntagma, repl=True, redu=True,baseline=True))[0]["redu"])
##################
#### Case 5.1 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'tiny']
right_data = gold_standard_data[syntagma[0]]
repl_num = right_data["repl"][1]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
data = list(data)
#p(data)
#p((len(data),repl_num))
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
if repl:
assert len(data) >= right_data["repl"][1]
#p(answer, "answer")
### REPL
if "repl" in right_data:
tuple(right_data["repl"]).should.be.equal(tuple(answer[syntagma[0]]["repl"]))
baseline_entry_repl = (int(answer[syntagma[0]]["baseline"]['occur_repl_uniq']), int(answer[syntagma[0]]["baseline"]['occur_repl_exhausted']))
tuple(right_data["repl"]).should.be.equal(tuple(baseline_entry_repl))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#answer[syntagma[0]]["baseline"]['occur_full_syn_repl']
### REDU
if "redu" in right_data:
tuple(right_data["redu"]).should.be.equal(tuple(answer[syntagma[0]]["redu"]))
baseline_entry_redu = (int(answer[syntagma[0]]["baseline"]['occur_redu_uniq']), int(answer[syntagma[0]]["baseline"]['occur_redu_exhausted']))
tuple(right_data["redu"]).should.be.equal(tuple(baseline_entry_redu))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#p(list(stats.get_data(syntagma, repl=True, redu=True,baseline=True))[0]["redu"])
##################
#### Case 5.1 ######
##################
repl = False
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'tiny']
right_data = gold_standard_data[syntagma[0]]
repl_num = right_data["repl"][1]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
data = list(data)
#p(data)
#p((len(data),repl_num))
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
if repl:
assert len(data) >= right_data["repl"][1]
#p(answer, "answer")
### REPL
if "repl" in right_data and repl:
tuple(right_data["repl"]).should.be.equal(tuple(answer[syntagma[0]]["repl"]))
baseline_entry_repl = (int(answer[syntagma[0]]["baseline"]['occur_repl_uniq']), int(answer[syntagma[0]]["baseline"]['occur_repl_exhausted']))
tuple(right_data["repl"]).should.be.equal(tuple(baseline_entry_repl))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#answer[syntagma[0]]["baseline"]['occur_full_syn_repl']
### REDU
if "redu" in right_data and redu:
tuple(right_data["redu"]).should.be.equal(tuple(answer[syntagma[0]]["redu"]))
baseline_entry_redu = (int(answer[syntagma[0]]["baseline"]['occur_redu_uniq']), int(answer[syntagma[0]]["baseline"]['occur_redu_exhausted']))
tuple(right_data["redu"]).should.be.equal(tuple(baseline_entry_redu))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#p(list(stats.get_data(syntagma, repl=True, redu=True,baseline=True))[0]["redu"])
##################
#### Case 5.1 ######
##################
repl = False
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'bad']
right_data = gold_standard_data[syntagma[0]]
repl_num = right_data["repl"][1]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
data = list(data)
#p(data)
#p((len(data),repl_num))
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
if repl:
assert len(data) >= right_data["repl"][1]
#p(answer, "answer")
### REPL
if "repl" in right_data and repl:
tuple(right_data["repl"]).should.be.equal(tuple(answer[syntagma[0]]["repl"]))
baseline_entry_repl = (int(answer[syntagma[0]]["baseline"]['occur_repl_uniq']), int(answer[syntagma[0]]["baseline"]['occur_repl_exhausted']))
tuple(right_data["repl"]).should.be.equal(tuple(baseline_entry_repl))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#answer[syntagma[0]]["baseline"]['occur_full_syn_repl']
### REDU
if "redu" in right_data and redu:
tuple(right_data["redu"]).should.be.equal(tuple(answer[syntagma[0]]["redu"]))
baseline_entry_redu = (int(answer[syntagma[0]]["baseline"]['occur_redu_uniq']), int(answer[syntagma[0]]["baseline"]['occur_redu_exhausted']))
tuple(right_data["redu"]).should.be.equal(tuple(baseline_entry_redu))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#p(list(stats.get_data(syntagma, repl=True, redu=True,baseline=True))[0]["redu"])
##################
#### Case 6.1 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'but']
right_data = gold_standard_data[syntagma[0]]
repl_num = right_data["repl"][1]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
data = list(data)
#p(data)
#p((len(data),repl_num))
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
if repl:
assert len(data) >= right_data["repl"][1]
#p(answer, "answer")
### REPL
if "repl" in right_data and repl:
tuple(right_data["repl"]).should.be.equal(tuple(answer[syntagma[0]]["repl"]))
baseline_entry_repl = (int(answer[syntagma[0]]["baseline"]['occur_repl_uniq']), int(answer[syntagma[0]]["baseline"]['occur_repl_exhausted']))
tuple(right_data["repl"]).should.be.equal(tuple(baseline_entry_repl))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#answer[syntagma[0]]["baseline"]['occur_full_syn_repl']
### REDU
if "redu" in right_data and redu:
tuple(right_data["redu"]).should.be.equal(tuple(answer[syntagma[0]]["redu"]))
baseline_entry_redu = (int(answer[syntagma[0]]["baseline"]['occur_redu_uniq']), int(answer[syntagma[0]]["baseline"]['occur_redu_exhausted']))
tuple(right_data["redu"]).should.be.equal(tuple(baseline_entry_redu))
len(syntagma).should.be.equal(answer[syntagma[0]]["baseline"]['scope'])
#p(list(stats.get_data(syntagma, repl=True, redu=True,baseline=True))[0]["redu"])
##################
#### Case 6.1 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'EMOIMG']
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search, syntagma_type="pos")
data = list(data)
#p(data)
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
repl_num = sum([_data["repl"][1] for word, _data in answer.items() if word != 'baseline'])
#p((len(data),repl_num))
if repl:
assert len(data) >= repl_num
answer = {word: {phanomen:counts for phanomen,counts in _data.items()} for word, _data in answer.items()}
#p(answer, "answer")
for word, _data in answer.items():
if word != "baseline":
if tuple(_data["repl"]) != gold_standard_data[word]["repl"]:
assert False
##################
#### Case 6.1 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'number']
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search, syntagma_type="pos")
data = list(data)
#p(data)
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
repl_num = sum([_data["repl"][1] for word, _data in answer.items() if word != 'baseline'])
#p((len(data),repl_num))
if repl:
assert len(data) >= repl_num
answer = {word: {phanomen:counts for phanomen,counts in _data.items()} for word, _data in answer.items()}
#p(answer, "answer")
for word, _data in answer.items():
if word != "baseline":
if tuple(_data["repl"]) != gold_standard_data[word]["repl"]:
assert False
##################
#### Case 6.1 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'EMOASC']
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search, syntagma_type="pos")
data = list(data)
#p(data)
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
repl_num = sum([_data["repl"][1] for word, _data in answer.items() if word != 'baseline'])
#p((len(data),repl_num))
if repl:
assert len(data) >= repl_num
answer = {word: {phanomen:counts for phanomen,counts in _data.items()} for word, _data in answer.items()}
#p(answer, "answer")
for word, _data in answer.items():
if word != "baseline":
if tuple(_data["repl"]) != gold_standard_data[word]["repl"]:
assert False
##################
#### Case 6.1 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'very', "pity"]
right_data = {
u'very':
{
'repl': [2, 4],
'redu': [1, 3]},
u'pity':
{
'repl': [2, 4],
'redu': [1, 4]},
u'very || pity':
{'baseline': {
'occur_repl_uniq': u'[2, 2]',
'occur_all': 1,
'occur_repl_exhausted': u'[4, 4]',
'occur_full_syn_redu': u'1',
'occur_redu_exhausted': u'[3, 4]',
'scope': 2,
'occur_full_syn_repl': u'1',
'occur_redu_uniq': u'[1, 1]'}}}
#repl_num = right_data["repl"][1]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
data = list(data)
#p(data)
#p((len(data),repl_num))
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
#if repl:
# assert len(data) >= right_data["repl"][1]
answer = {word:{phanomen:counts for phanomen, counts in data.items()} for word, data in answer.items() }
#p(answer, "answer")
### REPL
joined_syn = u" || ".join(syntagma)
#p(joined_syn )
if repl:
if "repl" in right_data[syntagma[0]]:
tuple(right_data[syntagma[0]]["repl"]).should.be.equal(tuple(answer[syntagma[0]]["repl"]))
baseline_entry_repl_word_1 = (
json.loads(
answer[joined_syn]["baseline"]['occur_repl_uniq']
)[0],
json.loads(
answer[joined_syn]["baseline"]['occur_repl_exhausted']
)[0]
)
tuple(right_data[syntagma[0]]["repl"]).should.be.equal(tuple(baseline_entry_repl_word_1))
if "repl" in right_data[syntagma[1]]:
baseline_entry_repl_word_2 = (
json.loads(
answer[joined_syn]["baseline"]['occur_repl_uniq']
)[1],
json.loads(
answer[joined_syn]["baseline"]['occur_repl_exhausted']
)[1]
)
tuple(right_data[syntagma[1]]["repl"]).should.be.equal(tuple(baseline_entry_repl_word_2))
len(syntagma).should.be.equal(answer[joined_syn]["baseline"]['scope'])
int(answer[joined_syn]["baseline"]['occur_full_syn_repl']).should.be.equal(int(right_data[joined_syn]["baseline"]['occur_full_syn_repl']))
# ### REDU
if redu:
if "redu" in right_data[syntagma[0]]:
tuple(right_data[syntagma[0]]["redu"]).should.be.equal(tuple(answer[syntagma[0]]["redu"]))
baseline_entry_redu_word_1 = (
json.loads(
answer[joined_syn]["baseline"]['occur_redu_uniq']
)[0],
json.loads(
answer[joined_syn]["baseline"]['occur_redu_exhausted']
)[0]
)
tuple(right_data[syntagma[0]]["redu"]).should.be.equal(tuple(baseline_entry_redu_word_1))
if "redu" in right_data[syntagma[1]]:
baseline_entry_redu_word_2 = (
json.loads(
answer[joined_syn]["baseline"]['occur_redu_uniq']
)[1],
json.loads(
answer[joined_syn]["baseline"]['occur_redu_exhausted']
)[1]
)
tuple(right_data[syntagma[1]]["redu"]).should.be.equal(tuple(baseline_entry_redu_word_2))
len(syntagma).should.be.equal(answer[joined_syn]["baseline"]['scope'])
int(answer[joined_syn]["baseline"]['occur_full_syn_redu']).should.be.equal(int(right_data[joined_syn]["baseline"]['occur_full_syn_redu']))
# #p(list(stats.get_data(syntagma, redu=True, redu=True,baseline=True))[0]["redu"])
#stats.recompute_syntagma_repetativity_scope(False)
##################
#### Case 6.1 ######
##################
repl = True
redu = True
baseline = True
output_table_type = "exhausted"
max_scope = False
additional_doc_cols = ()
context_len_left = True
context_len_right = True
word_examples_sum_table = True
header = stats._get_header( repl=repl, redu=redu, baseline=baseline, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
ordered_header = stats.order_header(header, False,"csv")
col_num = sum([sum([len(doc_cols) for doc_cols in cols if doc_cols]) if tables_part_name == "document" else len(cols) for tables_part_name, cols in header.iteritems() if cols ])
syntagma = [u'but', "you"]
#p(stats._get_data_for_one_syntagma(syntagma, redu=True, repl=True, baseline=True,get_also_non_full_repetativ_result=True)["redu"])
right_data = {
u'but || you':
{'baseline': {
'occur_repl_uniq': u'[10, 6]',
'occur_all': 4,
'occur_repl_exhausted': u'[15, 8]',
'occur_full_syn_redu': u'2',
'occur_redu_exhausted': u'[4, 4]',
'scope': 2, 'occur_full_syn_repl': u'4',
'occur_redu_uniq': u'[2, 2]'}},
u'you': {
'repl': [6, 8],
'redu': [2, 4]},
u'but': {
'repl': [10, 15],
'redu': [4, 10]}}
#repl_num = right_data["repl"][1]
stemmed_search = False
data = stats._export_generator(header,inp_syntagma=syntagma, stemmed_search=stemmed_search)
data = list(data)
#p(data)
#p((len(data),repl_num))
answer = self._summerize_reps3(ordered_header, data, redu=redu, repl=repl)
#if repl:
# assert len(data) >= right_data["repl"][1]
answer = {word:{phanomen:counts for phanomen, counts in data.items()} for word, data in answer.items() }
#p(answer, "answer")
### REPL
joined_syn = u" || ".join(syntagma)
#p(joined_syn )
if repl:
if "repl" in right_data[syntagma[0]]:
tuple(right_data[syntagma[0]]["repl"]).should.be.equal(tuple(answer[syntagma[0]]["repl"]))
baseline_entry_repl_word_1 = (
json.loads(
answer[joined_syn]["baseline"]['occur_repl_uniq']
)[0],
json.loads(
answer[joined_syn]["baseline"]['occur_repl_exhausted']
)[0]
)
tuple(right_data[syntagma[0]]["repl"]).should.be.equal(tuple(baseline_entry_repl_word_1))
if "repl" in right_data[syntagma[1]]:
baseline_entry_repl_word_2 = (
json.loads(
answer[joined_syn]["baseline"]['occur_repl_uniq']
)[1],
json.loads(
answer[joined_syn]["baseline"]['occur_repl_exhausted']
)[1]
)
tuple(right_data[syntagma[1]]["repl"]).should.be.equal(tuple(baseline_entry_repl_word_2))
len(syntagma).should.be.equal(answer[joined_syn]["baseline"]['scope'])
int(answer[joined_syn]["baseline"]['occur_full_syn_repl']).should.be.equal(int(right_data[joined_syn]["baseline"]['occur_full_syn_repl']))
# ### REDU
if redu:
if "redu" in right_data[syntagma[0]]:
tuple(right_data[syntagma[0]]["redu"]).should.be.equal(tuple(answer[syntagma[0]]["redu"]))
baseline_entry_redu_word_1 = (
json.loads(
answer[joined_syn]["baseline"]['occur_redu_uniq']
)[0],
json.loads(
answer[joined_syn]["baseline"]['occur_redu_exhausted']
)[0]
)
#tuple(right_data[syntagma[0]]["redu"]).should.be.equal(tuple(baseline_entry_redu_word_1))
if "redu" in right_data[syntagma[1]]:
baseline_entry_redu_word_2 = (
json.loads(
answer[joined_syn]["baseline"]['occur_redu_uniq']
)[1],
json.loads(
answer[joined_syn]["baseline"]['occur_redu_exhausted']
)[1]
)
tuple(right_data[syntagma[1]]["redu"]).should.be.equal(tuple(baseline_entry_redu_word_2))
len(syntagma).should.be.equal(answer[joined_syn]["baseline"]['scope'])
int(answer[joined_syn]["baseline"]['occur_full_syn_redu']).should.be.equal(int(right_data[joined_syn]["baseline"]['occur_full_syn_redu']))
# #p(list(stats.get_data(syntagma, redu=True, redu=True,baseline=True))[0]["redu"])
@attr(status='stable')
#@wipd
def test_get_where_statement_type_614(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True)#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_de))
################################################################################
##############################LEXEM############################################
################################################################################
#########################
####1. with_context#####
########################
### Case 1.0.1
inp_syntagma_splitted = [u"😀"]
inp_syntagma_unsplitted = u'😀'
scope = 1
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
#p(where, "where")#
assert where == [[u"normalized_word='\U0001f600' "]]
### Case 1.0.2
inp_syntagma_splitted = ["😀"]
inp_syntagma_unsplitted = '😀'
scope = 1
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='\U0001f600' "]]
#p(where, "where")#
### Case 1.0.3
inp_syntagma_splitted = [u"😀"]
inp_syntagma_unsplitted = '😀'
scope = 1
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='\U0001f600' "]]
#p(where, "where")#
### Case 1.0.4
inp_syntagma_splitted = ["😀"]
inp_syntagma_unsplitted = u'😀'
scope = 1
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='\U0001f600' "]]
#p(where, "where")#
### Case 1.0.5
inp_syntagma_splitted = ["😀"]
inp_syntagma_unsplitted = False
scope = 1
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='\U0001f600' "]]
#p(where, "where")#
### Case 1.0.6
inp_syntagma_splitted = [u"😀"]
inp_syntagma_unsplitted = False
scope = 1
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='\U0001f600' "]]
#p(where, "where")#
### Case 1.1
inp_syntagma_splitted = [u'.']
inp_syntagma_unsplitted = u'.'
scope = 1
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='.' "]]
#p(where, "where")
### Case 1.2
inp_syntagma_splitted = ['klitze', 'kleine']
inp_syntagma_unsplitted = 'klitze++kleine'
scope = 2
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='klitze' ", u"contextR1='kleine'"], [u"contextL1='klitze'", u"normalized_word='kleine' "]]
#p(where, "where")
### Case 1.3
inp_syntagma_splitted = ['klitze', 'kleine', "überaschung"]
inp_syntagma_unsplitted = 'klitze++kleine++überaschung'
scope = 3
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='klitze' ", u"contextR1='kleine'", u"contextR2='\xfcberaschung'"], [u"contextL1='klitze'", u"normalized_word='kleine' ", u"contextR1='\xfcberaschung'"], [u"contextL2='klitze'", u"contextL1='kleine'", u"normalized_word='\xfcberaschung' "]]
#p(where, "where")
### Case 1.3.1
inp_syntagma_splitted = ['1', 2, 3]
inp_syntagma_unsplitted = '1++2++3'
scope = 3
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='1' ", u"contextR1='2'", u"contextR2='3'"], [u"contextL1='1'", u"normalized_word='2' ", u"contextR1='3'"], [u"contextL2='1'", u"contextL1='2'", u"normalized_word='3' "]]
#p(where, "where")
### Case 1.3.2
inp_syntagma_splitted = [u'1', 2, 3]
inp_syntagma_unsplitted = '1++2++3'
scope = 3
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='1' ", u"contextR1='2'", u"contextR2='3'"], [u"contextL1='1'", u"normalized_word='2' ", u"contextR1='3'"], [u"contextL2='1'", u"contextL1='2'", u"normalized_word='3' "]]
#p(where, "where")
### Case 1.3.3
inp_syntagma_splitted = [u'1', 2, 3]
inp_syntagma_unsplitted = False
scope = 3
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='1' ", u"contextR1='2'", u"contextR2='3'"], [u"contextL1='1'", u"normalized_word='2' ", u"contextR1='3'"], [u"contextL2='1'", u"contextL1='2'", u"normalized_word='3' "]]
#p(where, "where")
### Case 1.3.4
inp_syntagma_splitted = ['1', 2, 3]
inp_syntagma_unsplitted = False
scope = 3
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='1' ", u"contextR1='2'", u"contextR2='3'"], [u"contextL1='1'", u"normalized_word='2' ", u"contextR1='3'"], [u"contextL2='1'", u"contextL1='2'", u"normalized_word='3' "]]
#p(where, "where")
### Case 1.3.5
inp_syntagma_splitted = [1, 2, 3]
inp_syntagma_unsplitted = False
scope = 3
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='1' ", u"contextR1='2'", u"contextR2='3'"], [u"contextL1='1'", u"normalized_word='2' ", u"contextR1='3'"], [u"contextL2='1'", u"contextL1='2'", u"normalized_word='3' "]]
#p(where, "where")
### Case 1.3.5
inp_syntagma_splitted = [1, 2, 3]
inp_syntagma_unsplitted = "1++2++3"
scope = 3
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='1' ", u"contextR1='2'", u"contextR2='3'"], [u"contextL1='1'", u"normalized_word='2' ", u"contextR1='3'"], [u"contextL2='1'", u"contextL1='2'", u"normalized_word='3' "]]
#p(where, "where")
### Case 1.3.5
inp_syntagma_splitted = [1, 2, 3]
inp_syntagma_unsplitted = u"1++2++3"
scope = 3
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [[u"normalized_word='1' ", u"contextR1='2'", u"contextR2='3'"], [u"contextL1='1'", u"normalized_word='2' ", u"contextR1='3'"], [u"contextL2='1'", u"contextL1='2'", u"normalized_word='3' "]]
#p(where, "where")
#########################
##2. without_context####
#########################
### Case 2.0.1
inp_syntagma_splitted = [u"😀"]
inp_syntagma_unsplitted = u'😀'
scope = 1
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [u"syntagma= '\U0001f600'"]
#p(where)
### Case 2.0.2
inp_syntagma_splitted = ["😀"]
inp_syntagma_unsplitted = '😀'
scope = 1
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
#p(where)
assert where == [u"syntagma= '\U0001f600'"]
### Case 2.0.3
inp_syntagma_splitted = ["😀"]
inp_syntagma_unsplitted = u'😀'
scope = 1
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
#p(where)
assert where == [u"syntagma= '\U0001f600'"]
### Case 2.0.4
inp_syntagma_splitted = [u"😀"]
inp_syntagma_unsplitted = '😀'
scope = 1
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
#p(where)
assert where == [u"syntagma= '\U0001f600'"]
### Case 2.0.5
inp_syntagma_splitted = [u"😀"]
inp_syntagma_unsplitted = False
scope = 1
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
#p(where)
assert where == [u"syntagma= '\U0001f600'"]
### Case 2.0.6
inp_syntagma_splitted = ["😀"]
inp_syntagma_unsplitted = False
scope = 1
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
#p(where)
assert where == [u"syntagma= '\U0001f600'"]
### Case 2.1
inp_syntagma_splitted = ['klitze', 'kleine']
inp_syntagma_unsplitted = 'klitze++kleine'
scope = 2
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [u"syntagma= 'klitze++kleine'"]
#p(where, "where")
### Case 2.2
inp_syntagma_splitted = ['klitze', 'kleine', "überaschung"]
inp_syntagma_unsplitted = 'klitze++kleine++überaschung'
scope = 3
with_context =False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [u"syntagma= 'klitze++kleine++\xfcberaschung'"]
#p(where, "where")
### Case 1.3.1
inp_syntagma_splitted = ['1', 2, 3]
inp_syntagma_unsplitted = '1++2++3'
scope = 3
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [u"syntagma= '1++2++3'"]
# p(where, "where")
### Case 1.3.2
inp_syntagma_splitted = [u'1', 2, 3]
inp_syntagma_unsplitted = '1++2++3'
scope = 3
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [u"syntagma= '1++2++3'"]
# p(where, "where")
### Case 1.3.3
inp_syntagma_splitted = [u'1', 2, 3]
inp_syntagma_unsplitted = False
scope = 3
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [u"syntagma= '1++2++3'"]
# p(where, "where")
### Case 1.3.4
inp_syntagma_splitted = ['1', 2, 3]
inp_syntagma_unsplitted = False
scope = 3
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [u"syntagma= '1++2++3'"]
# p(where, "where")
### Case 1.3.5
inp_syntagma_splitted = [1, 2, 3]
inp_syntagma_unsplitted = False
scope = 3
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [u"syntagma= '1++2++3'"]
# p(where, "where")
### Case 1.3.5
inp_syntagma_splitted = [1, 2, 3]
inp_syntagma_unsplitted = "1++2++3"
scope = 3
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [u"syntagma= '1++2++3'"]
# p(where, "where")
### Case 1.3.5
inp_syntagma_splitted = [1, 2, 3]
inp_syntagma_unsplitted = u"1++2++3"
scope = 3
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context))
assert where == [u"syntagma= '1++2++3'"]
# p(where, "where")
################################################################################
###########################OTHERS###############################################
###############################################################################
#########################
####1. with_context#####
########################
### Case 1.0.1
inp_syntagma_splitted = ["JJ"]
inp_syntagma_unsplitted = False
scope = 1
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context, syntagma_type="pos"))
assert where == [[u"pos='JJ' "]]
#p(where, "where")#
### Case 1.0.2
inp_syntagma_splitted = ["JJ", "JJ"]
inp_syntagma_unsplitted = False
scope = 2
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context, syntagma_type="pos"))
assert where == [[u"pos='JJ' ", u'json_extract("context_infoR1", "$[0]") = "JJ"'], [u'json_extract("context_infoL1", "$[0]") = "JJ"', u"pos='JJ' "]]
#p(where, "where")#
### Case 1.0.3
inp_syntagma_splitted = ["JJ", "JJ", "JJ"]
inp_syntagma_unsplitted = False
scope = 3
with_context = True
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context, syntagma_type="pos"))
assert where == [[u"pos='JJ' ", u'json_extract("context_infoR1", "$[0]") = "JJ"', u'json_extract("context_infoR2", "$[0]") = "JJ"'], [u'json_extract("context_infoL1", "$[0]") = "JJ"', u"pos='JJ' ", u'json_extract("context_infoR1", "$[0]") = "JJ"'], [u'json_extract("context_infoL2", "$[0]") = "JJ"', u'json_extract("context_infoL1", "$[0]") = "JJ"', u"pos='JJ' "]]
#p(where, "where")#
### Case 1.0.4
inp_syntagma_splitted = ["JJ"]
inp_syntagma_unsplitted = False
scope = 1
with_context = True
sentiment = "positive"
syntagma_type = "pos"
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context, syntagma_type=syntagma_type, sentiment=sentiment))
assert where == [[u"pos='JJ' ", u"polarity LIKE '%positive%'"]]
#p(where, "where")#
### Case 1.0.5
inp_syntagma_splitted = ["JJ", "JJ"]
inp_syntagma_unsplitted = False
scope = 2
with_context = True
sentiment = "positive"
syntagma_type = "pos"
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context, syntagma_type=syntagma_type, sentiment=sentiment))
assert where == [[u"pos='JJ' ", u'json_extract("context_infoR1", "$[0]") = "JJ"', u"polarity LIKE '%positive%'"], [u'json_extract("context_infoL1", "$[0]") = "JJ"', u"pos='JJ' ", u"polarity LIKE '%positive%'"]]
#p(where, "where")#
### Case 1.0.6
inp_syntagma_splitted = ["like",]
inp_syntagma_unsplitted = False
scope = 1
with_context = True
sentiment = "positive"
syntagma_type = "lexem"
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context, syntagma_type=syntagma_type, sentiment=sentiment))
assert where == [[u"normalized_word='like' ", u"polarity LIKE '%positive%'"]]
#p(where, "where")#
### Case 1.0.7
inp_syntagma_splitted = ["like","you"]
inp_syntagma_unsplitted = False
scope = 2
with_context = True
sentiment = "positive"
syntagma_type = "lexem"
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context, syntagma_type=syntagma_type, sentiment=sentiment))
assert where == [[u"normalized_word='like' ", u"contextR1='you'", u"polarity LIKE '%positive%'"], [u"contextL1='like'", u"normalized_word='you' ", u"polarity LIKE '%positive%'"]]
#p(where, "where")#
#########################
##2. without_context###
#######################
### Case 2.0.1
stats = Stats(mode="free",use_cash=True, logger_usage=False)#, )
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_de))
inp_syntagma_splitted = ["JJ"]
inp_syntagma_unsplitted = False
scope = 1
with_context = False
where = list(stats._get_where_statement(inp_syntagma_splitted, inp_syntagma_unsplitted,scope, with_context= with_context, syntagma_type="pos"))
#p(where, "where", c="r")#
assert not where
#stats
@attr(status='stable')
# @wipd
def test_clean_baseline_table_615(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True, status_bar=True)#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_de))
#stats.hhh()
baseline_rownum_bevore = stats.statsdb.rownum("baseline")
#p((baseline_rownum_bevore))
assert stats.clean_baseline_table()
stats.statsdb.commit()
assert stats.clean_baseline_table()
stats.statsdb.commit()
#assert stats._clean_baseline_table()
baseline_rownum_after = stats.statsdb.rownum("baseline")
assert baseline_rownum_bevore > baseline_rownum_after
@attr(status='stable')
#@wipd
def test_drop_indexes_616(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True)#, )
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_en))
#stats.hhh()
assert stats._get_created_indexes()
stats._drop_created_indexes()
assert not stats._get_created_indexes()
#assert
@attr(status='stable')
#@wipd
def test_create_indexes_617(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True)#, )
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_en))
##### Case 1
##### optimized_for_long_syntagmas=True #####
################################################
stats._drop_created_indexes()
#indexes_bevore = stats._get_created_indexes()
number_indexes_bevore = stats._get_number_created_indexes()
number_should_be_created = stats.create_additional_indexes(optimized_for_long_syntagmas=True)
#indexes_after = stats._get_created_indexes()
number_indexes_after = stats._get_number_created_indexes()
assert (number_indexes_after - number_indexes_bevore) == number_should_be_created
### Case 2
##### optimized_for_long_syntagmas=False #####
################################################
stats._drop_created_indexes()
#indexes_bevore = stats._get_created_indexes()
number_indexes_bevore = stats._get_number_created_indexes()
number_should_be_created = stats.create_additional_indexes(optimized_for_long_syntagmas=False)
#indexes_after = stats._get_created_indexes()
number_indexes_after = stats._get_number_created_indexes()
assert (number_indexes_after - number_indexes_bevore) == number_should_be_created
### Case 3
##### scope=3 #####
################################################
stats._drop_created_indexes()
#indexes_bevore = stats._get_created_indexes()
number_indexes_bevore = stats._get_number_created_indexes()
number_should_be_created = stats.create_additional_indexes(scope=3, optimized_for_long_syntagmas=False)
#indexes_after = stats._get_created_indexes()
number_indexes_after = stats._get_number_created_indexes()
assert (number_indexes_after - number_indexes_bevore) == number_should_be_created
### Case 4
##### indexes upgrade #####
################################################
stats._drop_created_indexes()
#indexes_bevore = stats._get_created_indexes()
number_indexes_bevore = stats._get_number_created_indexes()
number_should_be_created = stats.create_additional_indexes(scope=2, optimized_for_long_syntagmas=False)
stats._drop_created_indexes()
number_should_be_created = stats.create_additional_indexes(scope=3, optimized_for_long_syntagmas=False)
#indexes_after = stats._get_created_indexes()
# number_indexes_after = stats._get_number_created_indexes()
# assert (number_indexes_after - number_indexes_bevore) == number_should_be_created
## Case 5
################################################
##### creation of the same indexes #####
stats._drop_created_indexes()
#indexes_bevore = stats._get_created_indexes()
number_indexes_bevore = stats._get_number_created_indexes()
number_should_be_created1 = stats.create_additional_indexes()
number_indexes_after1 = stats._get_number_created_indexes()
### repetativ creation of the same indexes
number_should_be_created2 = stats.create_additional_indexes()
#indexes_after = stats._get_created_indexes()
number_indexes_after2 = stats._get_number_created_indexes()
assert (number_indexes_after2 - number_indexes_bevore) == number_should_be_created1
@attr(status='stable')
#@wipd
def test_clean_baseline_table_618(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True)#, )
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_en))
#stats.hhh()
assert stats.optimize_db()
@attr(status='stable')
#@wipd
def test_compute_baseline_sum_619(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
################################################
############ full_repetativ_syntagma=True
#################################################
##### EN ######
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True, status_bar=True)#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, full_repetativ_syntagma=True, baseline_delimiter="++")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_en))
stats.compute(corp,stream_number=1,adjust_to_cpu=False, freeze_db=False, optimized_for_long_syntagmas=True)
stats.optimize_db()
### RepCompution
stats._compute_baseline_sum()
stats.statsdb.commit()
baseline = stats.statsdb.getall("baseline")
right_baseline = [(u':-(++#shetlife++http://www.noooo.com', u':-(++#shetlif++http://www.noooo.com', 3, 1, None, None, None, None, None, None),
(u'tiny++model++,++which++we', u'tini++model++,++which++we', 5, 1, None, None, None, None, None, None),
(u'.++:-(++@real_trump++#shetlife', u'.++:-(++@real_trump++#shetlif', 4, 1, None, None, None, None, None, None),
(u'pity++for++me++.++:-(', u'piti++for++me++.++:-(', 5, 1, None, None, None, None, None, None),
(u'\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'\U0001f308++\U0001f600++\U0001f308++\U0001f600', 4, 1, u'[2, 2, "IGNOR", "IGNOR"]', u'[2, 2, "IGNOR", "IGNOR"]', None, None, u'1', None),
(u'tiny++model++,++which', u'tini++model++,++which', 4, 1, None, None, None, None, None, None),
(u'a++bad++news++,', u'a++bad++news++,', 4, 1, None, None, None, None, None, None),
(u'.++but', u'.++but', 2, 3, None, None, None, None, None, None),
(u'.++:-(++@real_trump', u'.++:-(++@real_trump', 3, 1, None, None, None, None, None, None),
(u'about++it++?++1++\U0001f62b++1', u'about++it++?++1++\U0001f62b++1', 6, 1, None, None, None, None, None, None),
(u'explain++a++big', u'explain++a++big', 3, 1, None, None, None, None, None, None),
(u'me++\U0001f62b++,', u'me++\U0001f62b++,', 3, 1, None, None, None, None, None, None),
(u'liked++it++:p++=)++\U0001f600', u'like++it++:p++=)++\U0001f600', 5, 1, None, None, None, None, None, None),
(u'realy', u'reali', 1, 4, u'2', u'4', u'1', u'3', u'2', u'1'),
(u'to++se++you++-)', u'to++se++you++-)', 4, 1, None, None, None, None, None, None),
(u'about++it++?++1', u'about++it++?++1', 4, 1, None, None, None, None, None, None),
(u'tiny', u'tini', 1, 10, u'1', u'1', u'2', u'9', u'1', u'2'),
(u'but++it++was++also++very', u'but++it++was++also++veri', 5, 1, None, None, None, None, None, None),
(u'surprise++.++but++you', u'surpris++.++but++you', 4, 1, None, None, None, None, None, None),
(u'\U0001f308++\U0001f600++\U0001f308', u'\U0001f308++\U0001f600++\U0001f308', 3, 1, u'[2, 1, "IGNOR"]', u'[2, 1, "IGNOR"]', None, None, u'1', None),
(u'explanation++.++right++?++what++do', u'explan++.++right++?++what++do', 6, 1, None, None, None, None, None, None),
(u'=)++\U0001f600++\U0001f308++\U0001f600', u'=)++\U0001f600++\U0001f308++\U0001f600', 4, 1, None, None, None, None, None, None),
(u'what++do++you++think++about', u'what++do++you++think++about', 5, 1, None, None, None, None, None, None),
(u'\U0001f62b++,', u'\U0001f62b++,', 2, 1, None, None, None, None, None, None),
(u'surprise++.++but', u'surpris++.++but', 3, 1, None, None, None, None, None, None),
(u'can++not++acept++.++-(++\U0001f62b', u'can++not++acept++.++-(++\U0001f62b', 6, 1, None, None, None, None, None, None),
(u'\U0001f308', u'\U0001f308', 1, 3, u'3', u'3', None, None, u'3', None),
(u'but++it++was++also++very++pity', u'but++it++was++also++veri++piti', 6, 1, None, None, None, None, None, None),
(u'i++realy++liked', u'i++reali++like', 3, 1, None, None, None, None, None, None),
(u'but++you++but', u'but++you++but', 3, 2, u'[10, 4, "IGNOR"]', u'[15, 4, "IGNOR"]', u'[4, 2, "IGNOR"]', u'[10, 4, "IGNOR"]', u'2', u'2'),
(u':-(++#shetlife', u':-(++#shetlif', 2, 1, None, None, None, None, None, None),
(u'a++big++things', u'a++big++thing', 3, 1, None, None, None, None, None, None),
(u'?++what++do++you', u'?++what++do++you', 4, 1, None, None, None, None, None, None),
(u'se', u'se', 1, 1, u'1', u'1', None, None, u'1', None),
(u'.++but++you++but++you', u'.++but++you++but++you', 5, 2, None, None, None, None, None, None),
(u'very++pity++for++me', u'veri++piti++for++me', 4, 1, None, None, None, None, None, None),
(u'tiny++surprise++.', u'tini++surpris++.', 3, 1, None, None, None, None, None, None),
(u':-(++@real_trump', u':-(++@real_trump', 2, 1, None, None, None, None, None, None),
(u'-(++\U0001f62b++:-(++#shetlife', u'-(++\U0001f62b++:-(++#shetlif', 4, 1, None, None, None, None, None, None),
(u'.++but++you++but', u'.++but++you++but', 4, 2, None, None, None, None, None, None),
(u',++but++a++big++explanation', u',++but++a++big++explan', 5, 1, None, None, None, None, None, None),
(u'=)++\U0001f600', u'=)++\U0001f600', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None),
(u'for++me++.++:-(++@real_trump', u'for++me++.++:-(++@real_trump', 5, 1, None, None, None, None, None, None),
(u'tiny++model++,', u'tini++model++,', 3, 2, None, None, None, None, None, None),
(u'you++think++about++it++?++1', u'you++think++about++it++?++1', 6, 1, None, None, None, None, None, None),
(u'use++for++explain++a++big++things', u'use++for++explain++a++big++thing', 6, 1, None, None, None, None, None, None),
(u'use++for++explain++a++big', u'use++for++explain++a++big', 5, 1, None, None, None, None, None, None),
(u'model++,++which++we++can', u'model++,++which++we++can', 5, 1, None, None, None, None, None, None),
(u'it++:p++=)++\U0001f600++\U0001f308++\U0001f600', u'it++:p++=)++\U0001f600++\U0001f308++\U0001f600', 6, 1, None, None, None, None, None, None),
(u'?++what++do++you++think', u'?++what++do++you++think', 5, 1, None, None, None, None, None, None),
(u'bad++news++,', u'bad++news++,', 3, 1, None, None, None, None, None, None),
(u'but++you++but++you', u'but++you++but++you', 4, 2, u'[10, 6, "IGNOR", "IGNOR"]', u'[15, 8, "IGNOR", "IGNOR"]', None, None, u'2', None),
(u'bad', u'bad', 1, 6, u'4', u'7', u'1', u'5', u'4', u'1'),
(u'pity++for++me', u'piti++for++me', 3, 1, None, None, None, None, None, None),
(u'.++-(++\U0001f62b++:-(', u'.++-(++\U0001f62b++:-(', 4, 1, None, None, None, None, None, None),
(u'tiny++surprise++.++but++you', u'tini++surpris++.++but++you', 5, 1, None, None, None, None, None, None),
(u'but++i++realy++liked', u'but++i++reali++like', 4, 1, None, None, None, None, None, None),
(u',++but++i++realy++liked', u',++but++i++reali++like', 5, 1, None, None, None, None, None, None),
(u'.++right++?', u'.++right++?', 3, 1, None, None, None, None, None, None),
(u'1++\U0001f62b++1++.++but++you', u'1++\U0001f62b++1++.++but++you', 6, 1, None, None, None, None, None, None),
(u'a++bad++news++,++which++we', u'a++bad++news++,++which++we', 6, 1, None, None, None, None, None, None),
(u'\U0001f62b++:-(', u'\U0001f62b++:-(', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None),
(u'?++1++\U0001f62b++1', u'?++1++\U0001f62b++1', 4, 1, u'[1, 2, 1, "IGNOR"]', u'[1, 2, 1, "IGNOR"]', None, None, u'1', None),
(u'.++but++it++was++also++very', u'.++but++it++was++also++veri', 6, 1, None, None, None, None, None, None),
(u',++but++a++big', u',++but++a++big', 4, 1, None, None, None, None, None, None),
(u'\U0001f62b++:-(++#shetlife', u'\U0001f62b++:-(++#shetlif', 3, 1, None, None, None, None, None, None),
(u'also++very++pity++for++me', u'also++veri++piti++for++me', 5, 1, None, None, None, None, None, None),
(u'but++a++big++explanation++.++right', u'but++a++big++explan++.++right', 6, 1, None, None, None, None, None, None),
(u'it++was++also++very', u'it++was++also++veri', 4, 1, None, None, None, None, None, None),
(u'but++a++big++explanation++.', u'but++a++big++explan++.', 5, 1, None, None, None, None, None, None),
(u',++but++i++realy', u',++but++i++reali', 4, 1, None, None, None, None, None, None),
(u'it++?++1++\U0001f62b', u'it++?++1++\U0001f62b', 4, 1, None, None, None, None, None, None),
(u'a++big', u'a++big', 2, 2, None, None, None, None, None, None),
(u'acept++.++-(', u'acept++.++-(', 3, 1, None, None, None, None, None, None),
(u'but', u'but', 1, 13, u'11', u'16', u'4', u'10', u'11', u'4'),
(u'tiny++surprise', u'tini++surpris', 2, 1, None, None, None, None, None, None),
(u'realy++liked', u'reali++like', 2, 1, None, None, None, None, None, None),
(u'what++do++you++think++about++it', u'what++do++you++think++about++it', 6, 1, None, None, None, None, None, None),
(u':p++=)++\U0001f600++\U0001f308++\U0001f600', u':p++=)++\U0001f600++\U0001f308++\U0001f600', 5, 1, None, None, None, None, None, None),
(u'you++-)', u'you++-)', 2, 1, None, None, None, None, None, None),
(u'\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'\U0001f600++\U0001f308++\U0001f600++\U0001f308', 4, 1, u'[2, 2, "IGNOR", "IGNOR"]', u'[2, 2, "IGNOR", "IGNOR"]', None, None, u'1', None),
(u'.++:-(++@real_trump++#shetlife++#readytogo', u'.++:-(++@real_trump++#shetlif++#readytogo', 5, 1, None, None, None, None, None, None),
(u'what++do++you', u'what++do++you', 3, 1, None, None, None, None, None, None),
(u'surprise++for++me++\U0001f62b', u'surpris++for++me++\U0001f62b', 4, 1, None, None, None, None, None, None),
(u'?++what++do++you++think++about', u'?++what++do++you++think++about', 6, 1, None, None, None, None, None, None),
(u'a++bad++news', u'a++bad++news', 3, 1, None, None, None, None, None, None),
(u'very++pity++for', u'veri++piti++for', 3, 1, None, None, None, None, None, None),
(u',++but++i', u',++but++i', 3, 1, None, None, None, None, None, None),
(u'glad++to', u'glad++to', 2, 1, None, None, None, None, None, None),
(u'big++things++.', u'big++thing++.', 3, 1, None, None, None, None, None, None),
(u'for++me++\U0001f62b++,', u'for++me++\U0001f62b++,', 4, 1, None, None, None, None, None, None),
(u':-(++@real_trump++#shetlife++#readytogo', u':-(++@real_trump++#shetlif++#readytogo', 4, 1, None, None, None, None, None, None),
(u'.++:-(++@real_trump++#shetlife++#readytogo++http://www.absurd.com', u'.++:-(++@real_trump++#shetlif++#readytogo++http://www.absurd.com', 6, 1, None, None, None, None, None, None),
(u'.', u'.', 1, 7, u'1', u'1', None, None, u'1', None),
(u'but++i++realy++liked++it', u'but++i++reali++like++it', 5, 1, None, None, None, None, None, None),
(u'pity', u'piti', 1, 4, u'2', u'4', u'1', u'4', u'2', u'1'),
(u'explanation++.++right++?', u'explan++.++right++?', 4, 1, None, None, None, None, None, None),
(u'do++you++think++about++it', u'do++you++think++about++it', 5, 1, None, None, None, None, None, None),
(u'think++about++it++?++1', u'think++about++it++?++1', 5, 1, None, None, None, None, None, None),
(u'also++very++pity++for++me++.', u'also++veri++piti++for++me++.', 6, 1, None, None, None, None, None, None),
(u'\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', 5, 1, u'[3, 2, "IGNOR", "IGNOR", "IGNOR"]', u'[3, 2, "IGNOR", "IGNOR", "IGNOR"]', None, None, u'1', None),
(u'se++you', u'se++you', 2, 1, None, None, None, None, None, None),
(u'realy++liked++it', u'reali++like++it', 3, 1, None, None, None, None, None, None),
(u'me++\U0001f62b++,++but', u'me++\U0001f62b++,++but', 4, 1, None, None, None, None, None, None),
(u'for++me++.++:-(++@real_trump++#shetlife', u'for++me++.++:-(++@real_trump++#shetlif', 6, 1, None, None, None, None, None, None),
(u'\U0001f600++\U0001f308++\U0001f600', u'\U0001f600++\U0001f308++\U0001f600', 3, 3, u'[2, 1, "IGNOR"]', u'[3, 1, "IGNOR"]', None, None, u'1', None),
(u'big++explanation++.++right++?', u'big++explan++.++right++?', 5, 1, None, None, None, None, None, None),
(u'bad++news', u'bad++news', 2, 1, None, None, None, None, None, None),
(u'glad++to++se++you', u'glad++to++se++you', 4, 1, None, None, None, None, None, None),
(u'model++,++but++a++big', u'model++,++but++a++big', 5, 1, None, None, None, None, None, None),
(u'\U0001f62b++1++.', u'\U0001f62b++1++.', 3, 1, None, None, None, None, None, None),
(u'it++:p++=)++\U0001f600++\U0001f308', u'it++:p++=)++\U0001f600++\U0001f308', 5, 1, None, None, None, None, None, None),
(u'explain++a++big++things', u'explain++a++big++thing', 4, 1, None, None, None, None, None, None),
(u'also++very', u'also++veri', 2, 1, None, None, None, None, None, None),
(u'to++se', u'to++se', 2, 1, None, None, None, None, None, None),
(u'you++but++you++\U0001f600++\U0001f308', u'you++but++you++\U0001f600++\U0001f308', 5, 1, u'[3, 3, "IGNOR", 1, 1]', u'[4, 3, "IGNOR", 1, 1]', None, None, u'1', None),
(u'to++se++you', u'to++se++you', 3, 1, None, None, None, None, None, None),
(u'realy++bad++surprise++for++me++\U0001f62b', u'reali++bad++surpris++for++me++\U0001f62b', 6, 1, None, None, None, None, None, None),
(u'realy++liked++it++:p', u'reali++like++it++:p', 4, 1, None, None, None, None, None, None),
(u'you++but++you++\U0001f600', u'you++but++you++\U0001f600', 4, 1, u'[3, 3, "IGNOR", 1]', u'[4, 3, "IGNOR", 1]', None, None, u'1', None),
(u'not++acept++.++-(++\U0001f62b++:-(', u'not++acept++.++-(++\U0001f62b++:-(', 6, 1, None, None, None, None, None, None),
(u'very', u'veri', 1, 3, u'2', u'4', u'1', u'3', u'2', u'1'),
(u'1++.++but++you', u'1++.++but++you', 4, 1, None, None, None, None, None, None),
(u'surprise++for++me++\U0001f62b++,', u'surpris++for++me++\U0001f62b++,', 5, 1, None, None, None, None, None, None),
(u'.++right++?++what++do', u'.++right++?++what++do', 5, 1, None, None, None, None, None, None),
(u'was++also++very++pity', u'was++also++veri++piti', 4, 1, None, None, None, None, None, None),
(u'1++\U0001f62b++1', u'1++\U0001f62b++1', 3, 1, u'[2, 1, "IGNOR"]', u'[2, 1, "IGNOR"]', None, None, u'1', None),
(u'big++explanation++.++right', u'big++explan++.++right', 4, 1, None, None, None, None, None, None),
(u'for++explain++a++big', u'for++explain++a++big', 4, 1, None, None, None, None, None, None),
(u'for++me++\U0001f62b++,++but++i', u'for++me++\U0001f62b++,++but++i', 6, 1, None, None, None, None, None, None),
(u':-(++@real_trump++#shetlife', u':-(++@real_trump++#shetlif', 3, 1, None, None, None, None, None, None),
(u'?++1++\U0001f62b++1++.++but', u'?++1++\U0001f62b++1++.++but', 6, 1, None, None, None, None, None, None),
(u'1++\U0001f62b++1++.++but', u'1++\U0001f62b++1++.++but', 5, 1, None, None, None, None, None, None),
(u'think++about++it++?', u'think++about++it++?', 4, 1, None, None, None, None, None, None),
(u'big', u'big', 1, 5, u'2', u'2', u'2', u'5', u'2', u'2'),
(u'realy++liked++it++:p++=)', u'reali++like++it++:p++=)', 5, 1, None, None, None, None, None, None),
(u'we++can++not++acept++.++-(', u'we++can++not++acept++.++-(', 6, 1, None, None, None, None, None, None),
(u'a++big++explanation++.', u'a++big++explan++.', 4, 1, None, None, None, None, None, None),
(u'for++explain++a++big++things', u'for++explain++a++big++thing', 5, 1, None, None, None, None, None, None),
(u'model', u'model', 1, 2, u'1', u'2', None, None, u'1', None),
(u'bad++news++,++which', u'bad++news++,++which', 4, 1, None, None, None, None, None, None),
(u'you++\U0001f600++\U0001f308++\U0001f600', u'you++\U0001f600++\U0001f308++\U0001f600', 4, 1, u'[1, 2, 1, "IGNOR"]', u'[2, 2, 1, "IGNOR"]', None, None, u'1', None),
(u'i++realy++liked++it++:p', u'i++reali++like++it++:p', 5, 1, None, None, None, None, None, None),
(u'but++i++realy++liked++it++:p', u'but++i++reali++like++it++:p', 6, 1, None, None, None, None, None, None),
(u'glad++to++se++you++-)', u'glad++to++se++you++-)', 5, 1, None, None, None, None, None, None),
(u'1++\U0001f62b++1++.', u'1++\U0001f62b++1++.', 4, 1, None, None, None, None, None, None),
(u'you++think', u'you++think', 2, 1, None, None, None, None, None, None),
(u'not++acept++.++-(++\U0001f62b', u'not++acept++.++-(++\U0001f62b', 5, 1, None, None, None, None, None, None),
(u',++but++a++big++explanation++.', u',++but++a++big++explan++.', 6, 1, None, None, None, None, None, None),
(u'think++about++it++?++1++\U0001f62b', u'think++about++it++?++1++\U0001f62b', 6, 1, None, None, None, None, None, None),
(u'but++you', u'but++you', 2, 4, u'[10, 6]', u'[15, 8]', u'[2, 2]', u'[4, 4]', u'4', u'2'),
(u'-)', u'-)', 1, 1, u'1', u'1', None, None, u'1', None),
(u'-(++\U0001f62b', u'-(++\U0001f62b', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None),
(u'big++explanation++.++right++?++what', u'big++explan++.++right++?++what', 6, 1, None, None, None, None, None, None),
(u'me++.++:-(', u'me++.++:-(', 3, 1, None, None, None, None, None, None),
(u'tiny++surprise++.++but', u'tini++surpris++.++but', 4, 1, None, None, None, None, None, None),
(u'-(++\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'-(++\U0001f62b++:-(++#shetlif++http://www.noooo.com', 5, 1, None, None, None, None, None, None),
(u'\U0001f62b++1++.++but++you', u'\U0001f62b++1++.++but++you', 5, 1, None, None, None, None, None, None),
(u'it++?', u'it++?', 2, 1, None, None, None, None, None, None),
(u'\U0001f62b++,++but', u'\U0001f62b++,++but', 3, 1, None, None, None, None, None, None),
(u'model++,', u'model++,', 2, 2, None, None, None, None, None, None),
(u'me++.++:-(++@real_trump++#shetlife++#readytogo', u'me++.++:-(++@real_trump++#shetlif++#readytogo', 6, 1, None, None, None, None, None, None),
(u'right++?++what++do', u'right++?++what++do', 4, 1, None, None, None, None, None, None),
(u'\U0001f308++\U0001f600', u'\U0001f308++\U0001f600', 2, 3, u'[2, 2]', u'[2, 2]', None, None, u'2', None),
(u'=)', u'=)', 1, 1, u'1', u'1', None, None, u'1', None),
(u'it++was++also++very++pity', u'it++was++also++veri++piti', 5, 1, None, None, None, None, None, None),
(u'i++realy++liked++it', u'i++reali++like++it', 4, 1, None, None, None, None, None, None),
(u'se++you++-)', u'se++you++-)', 3, 1, None, None, None, None, None, None),
(u'tiny++model', u'tini++model', 2, 2, None, None, None, None, None, None),
(u'it++:p++=)++\U0001f600', u'it++:p++=)++\U0001f600', 4, 1, None, None, None, None, None, None),
(u'a++bad++news++,++which', u'a++bad++news++,++which', 5, 1, None, None, None, None, None, None),
(u'it++?++1++\U0001f62b++1++.', u'it++?++1++\U0001f62b++1++.', 6, 1, None, None, None, None, None, None),
(u'1++.++but++you++but', u'1++.++but++you++but', 5, 1, None, None, None, None, None, None),
(u'right', u'right', 1, 1, u'1', u'1', None, None, u'1', None),
(u'it++:p++=)', u'it++:p++=)', 3, 1, None, None, None, None, None, None),
(u'model++,++which++we', u'model++,++which++we', 4, 1, None, None, None, None, None, None),
(u'but++you++but++you++\U0001f600++\U0001f308', u'but++you++but++you++\U0001f600++\U0001f308', 6, 1, u'[5, 3, "IGNOR", "IGNOR", 1, 1]', u'[5, 4, "IGNOR", "IGNOR", 1, 1]', None, None, u'1', None),
(u'#shetlife', u'#shetlif', 1, 3, None, None, u'1', u'2', None, u'1'),
(u'?', u'?', 1, 2, u'1', u'1', None, None, u'1', None),
(u'me++.++:-(++@real_trump', u'me++.++:-(++@real_trump', 4, 1, None, None, None, None, None, None),
(u'acept++.++-(++\U0001f62b++:-(', u'acept++.++-(++\U0001f62b++:-(', 5, 1, None, None, None, None, None, None),
(u'but++you++\U0001f600++\U0001f308', u'but++you++\U0001f600++\U0001f308', 4, 1, u'[3, 1, 1, 1]', u'[3, 2, 1, 1]', None, None, u'1', None),
(u'very++pity++for++me++.', u'veri++piti++for++me++.', 5, 1, None, None, None, None, None, None),
(u'\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'\U0001f62b++:-(++#shetlif++http://www.noooo.com', 4, 1, None, None, None, None, None, None),
(u'explanation++.', u'explan++.', 2, 1, None, None, None, None, None, None),
(u'.++but++you++but++you++\U0001f600', u'.++but++you++but++you++\U0001f600', 6, 1, None, None, None, None, None, None),
(u'.++-(++\U0001f62b++:-(++#shetlife++http://www.noooo.com', u'.++-(++\U0001f62b++:-(++#shetlif++http://www.noooo.com', 6, 1, None, None, None, None, None, None),
(u'.++-(', u'.++-(', 2, 1, None, None, None, None, None, None),
(u'i++realy++liked++it++:p++=)', u'i++reali++like++it++:p++=)', 6, 1, None, None, None, None, None, None),
(u'\U0001f600++\U0001f308', u'\U0001f600++\U0001f308', 2, 3, u'[3, 3]', u'[3, 3]', None, None, u'3', None),
(u'explanation', u'explan', 1, 1, u'1', u'1', None, None, u'1', None),
(u'you++but++you++\U0001f600++\U0001f308++\U0001f600', u'you++but++you++\U0001f600++\U0001f308++\U0001f600', 6, 1, u'[3, 3, "IGNOR", 2, 1, "IGNOR"]', u'[4, 3, "IGNOR", 2, 1, "IGNOR"]', None, None, u'1', None),
(u'do++you++think', u'do++you++think', 3, 1, None, None, None, None, None, None),
(u'acept++.++-(++\U0001f62b++:-(++#shetlife', u'acept++.++-(++\U0001f62b++:-(++#shetlif', 6, 1, None, None, None, None, None, None),
(u'but++i', u'but++i', 2, 1, None, None, None, None, None, None),
(u'\U0001f62b++,++but++i++realy++liked', u'\U0001f62b++,++but++i++reali++like', 6, 1, None, None, None, None, None, None),
(u'me++\U0001f62b++,++but++i++realy', u'me++\U0001f62b++,++but++i++reali', 6, 1, None, None, None, None, None, None),
(u'but++you++but++you++\U0001f600', u'but++you++but++you++\U0001f600', 5, 1, u'[5, 3, "IGNOR", "IGNOR", 1]', u'[5, 4, "IGNOR", "IGNOR", 1]', None, None, u'1', None),
(u'acept++.++-(++\U0001f62b', u'acept++.++-(++\U0001f62b', 4, 1, None, None, None, None, None, None),
(u',++but', u',++but', 2, 2, None, None, None, None, None, None),
(u'was++also++very++pity++for', u'was++also++veri++piti++for', 5, 1, None, None, None, None, None, None),
(u'surprise++.++but++you++but', u'surpris++.++but++you++but', 5, 1, None, None, None, None, None, None),
(u'surprise++.++but++you++but++you', u'surpris++.++but++you++but++you', 6, 1, None, None, None, None, None, None),
(u'a++big++explanation++.++right', u'a++big++explan++.++right', 5, 1, None, None, None, None, None, None),
(u':p++=)', u':p++=)', 2, 1, None, None, None, None, None, None),
(u'\U0001f62b++,++but++i', u'\U0001f62b++,++but++i', 4, 1, None, None, None, None, None, None),
(u'tiny++model++,++which++we++can', u'tini++model++,++which++we++can', 6, 1, None, None, None, None, None, None),
(u'\U0001f62b++1', u'\U0001f62b++1', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None),
(u'?++1++\U0001f62b', u'?++1++\U0001f62b', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None),
(u'was++also++very', u'was++also++veri', 3, 1, None, None, None, None, None, None),
(u':p++=)++\U0001f600++\U0001f308', u':p++=)++\U0001f600++\U0001f308', 4, 1, None, None, None, None, None, None),
(u'surprise++for++me++\U0001f62b++,++but', u'surpris++for++me++\U0001f62b++,++but', 6, 1, None, None, None, None, None, None),
(u'about++it++?++1++\U0001f62b', u'about++it++?++1++\U0001f62b', 5, 1, None, None, None, None, None, None),
(u'me++.++:-(++@real_trump++#shetlife', u'me++.++:-(++@real_trump++#shetlif', 5, 1, None, None, None, None, None, None),
(u'you++think++about++it', u'you++think++about++it', 4, 1, None, None, None, None, None, None),
(u'but++you++\U0001f600++\U0001f308++\U0001f600', u'but++you++\U0001f600++\U0001f308++\U0001f600', 5, 1, u'[3, 1, 2, 1, "IGNOR"]', u'[3, 2, 2, 1, "IGNOR"]', None, None, u'1', None),
(u'but++you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'but++you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', 6, 1, u'[3, 1, 2, 2, "IGNOR", "IGNOR"]', u'[3, 2, 2, 2, "IGNOR", "IGNOR"]', None, None, u'1', None),
(u'also++very++pity++for', u'also++veri++piti++for', 4, 1, None, None, None, None, None, None),
(u'you++\U0001f600', u'you++\U0001f600', 2, 1, u'[1, 1]', u'[2, 1]', None, None, u'1', None),
(u'glad++to++se', u'glad++to++se', 3, 1, None, None, None, None, None, None),
(u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308', 5, 1, u'[1, 2, 2, "IGNOR", "IGNOR"]', u'[2, 2, 2, "IGNOR", "IGNOR"]', None, None, u'1', None),
(u'#shetlife++http://www.noooo.com', u'#shetlif++http://www.noooo.com', 2, 1, None, None, None, None, None, None),
(u'1++.++but++you++but++you', u'1++.++but++you++but++you', 6, 1, None, None, None, None, None, None),
(u'was++also++very++pity++for++me', u'was++also++veri++piti++for++me', 6, 1, None, None, None, None, None, None),
(u'.++-(++\U0001f62b++:-(++#shetlife', u'.++-(++\U0001f62b++:-(++#shetlif', 5, 1, None, None, None, None, None, None),
(u'1++.', u'1++.', 2, 1, None, None, None, None, None, None),
(u'i++realy', u'i++reali', 2, 1, None, None, None, None, None, None),
(u'can++use++for++explain++a++big', u'can++use++for++explain++a++big', 6, 1, None, None, None, None, None, None),
(u'very++pity', u'veri++piti', 2, 1, u'[2, 2]', u'[4, 4]', u'[1, 1]', u'[3, 4]', u'1', u'1'),
(u'liked++it++:p++=)++\U0001f600++\U0001f308', u'like++it++:p++=)++\U0001f600++\U0001f308', 6, 1, None, None, None, None, None, None),
(u'do++you++think++about', u'do++you++think++about', 4, 1, None, None, None, None, None, None),
(u'bad++surprise++for++me++\U0001f62b++,', u'bad++surpris++for++me++\U0001f62b++,', 6, 1, None, None, None, None, None, None),
(u':-(++@real_trump++#shetlife++#readytogo++http://www.absurd.com', u':-(++@real_trump++#shetlif++#readytogo++http://www.absurd.com', 5, 1, None, None, None, None, None, None),
(u'me++.', u'me++.', 2, 1, None, None, None, None, None, None),
(u'me++\U0001f62b++,++but++i', u'me++\U0001f62b++,++but++i', 5, 1, None, None, None, None, None, None),
(u'you++think++about++it++?', u'you++think++about++it++?', 5, 1, None, None, None, None, None, None),
(u'right++?++what++do++you', u'right++?++what++do++you', 5, 1, None, None, None, None, None, None),
(u'1', u'1', 1, 2, u'2', u'2', None, None, u'2', None),
(u'pity++for++me++.', u'piti++for++me++.', 4, 1, None, None, None, None, None, None),
(u'explain++a++big++things++.', u'explain++a++big++thing++.', 5, 1, None, None, None, None, None, None),
(u'what++do++you++think', u'what++do++you++think', 4, 1, None, None, None, None, None, None),
(u'for++me++.++:-(', u'for++me++.++:-(', 4, 1, None, None, None, None, None, None),
(u'\U0001f600', u'\U0001f600', 1, 5, u'4', u'4', None, None, u'4', None),
(u'you++but', u'you++but', 2, 2, u'[4, 6]', u'[4, 8]', u'[2, 2]', u'[4, 6]', u'2', u'2'),
(u'bad++news++,++which++we', u'bad++news++,++which++we', 5, 1, None, None, None, None, None, None),
(u'very++pity++for++me++.++:-(', u'veri++piti++for++me++.++:-(', 6, 1, None, None, None, None, None, None),
(u'.++right++?++what', u'.++right++?++what', 4, 1, None, None, None, None, None, None),
(u'.++but++you', u'.++but++you', 3, 2, None, None, None, None, None, None),
(u'but++a++big', u'but++a++big', 3, 1, None, None, None, None, None, None),
(u'it++was++also++very++pity++for', u'it++was++also++veri++piti++for', 6, 1, None, None, None, None, None, None),
(u'bad++news++,++which++we++can', u'bad++news++,++which++we++can', 6, 1, None, None, None, None, None, None),
(u'\U0001f62b++,++but++i++realy', u'\U0001f62b++,++but++i++reali', 5, 1, None, None, None, None, None, None),
(u'=)++\U0001f600++\U0001f308', u'=)++\U0001f600++\U0001f308', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None),
(u'for++me++.', u'for++me++.', 3, 1, None, None, None, None, None, None),
(u'realy++liked++it++:p++=)++\U0001f600', u'reali++like++it++:p++=)++\U0001f600', 6, 1, None, None, None, None, None, None),
(u'explanation++.++right++?++what', u'explan++.++right++?++what', 5, 1, None, None, None, None, None, None),
(u'model++,++but++a++big++explanation', u'model++,++but++a++big++explan', 6, 1, None, None, None, None, None, None),
(u'a++big++explanation', u'a++big++explan', 3, 1, None, None, None, None, None, None),
(u'you++but++you', u'you++but++you', 3, 2, u'[6, 6, "IGNOR"]', u'[8, 8, "IGNOR"]', None, None, u'2', None),
(u'-(++\U0001f62b++:-(', u'-(++\U0001f62b++:-(', 3, 1, u'[1, 1, 1]', u'[1, 1, 1]', None, None, u'1', None),
(u'explanation++.++right', u'explan++.++right', 3, 1, None, None, None, None, None, None),
(u'you', u'you', 1, 8, u'7', u'9', u'2', u'4', u'7', u'2'),
(u'big++things', u'big++thing', 2, 1, None, None, None, None, None, None),
(u'it++?++1', u'it++?++1', 3, 1, None, None, None, None, None, None),
(u'for++me++\U0001f62b', u'for++me++\U0001f62b', 3, 1, None, None, None, None, None, None),
(u'-(', u'-(', 1, 1, u'1', u'1', None, None, u'1', None),
(u'tiny++surprise++.++but++you++but', u'tini++surpris++.++but++you++but', 6, 1, None, None, None, None, None, None),
(u'right++?++what++do++you++think', u'right++?++what++do++you++think', 6, 1, None, None, None, None, None, None),
(u'big++explanation++.', u'big++explan++.', 3, 1, None, None, None, None, None, None),
(u'for++explain++a++big++things++.', u'for++explain++a++big++thing++.', 6, 1, None, None, None, None, None, None),
(u'.++right', u'.++right', 2, 1, None, None, None, None, None, None),
(u'\U0001f62b', u'\U0001f62b', 1, 3, u'3', u'3', None, None, u'3', None),
(u'not++acept++.++-(', u'not++acept++.++-(', 4, 1, None, None, None, None, None, None),
(u'for++me++\U0001f62b++,++but', u'for++me++\U0001f62b++,++but', 5, 1, None, None, None, None, None, None),
(u'you++think++about', u'you++think++about', 3, 1, None, None, None, None, None, None),
(u'a++bad', u'a++bad', 2, 1, None, None, None, None, None, None),
(u'?++1', u'?++1', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None),
(u'do++you++think++about++it++?', u'do++you++think++about++it++?', 6, 1, None, None, None, None, None, None),
(u'can++not++acept++.++-(', u'can++not++acept++.++-(', 5, 1, None, None, None, None, None, None),
(u'a++big++things++.', u'a++big++thing++.', 4, 1, None, None, None, None, None, None),
(u'but++you++\U0001f600', u'but++you++\U0001f600', 3, 1, u'[3, 1, 1]', u'[3, 2, 1]', None, None, u'1', None),
(u'\U0001f62b++1++.++but', u'\U0001f62b++1++.++but', 4, 1, None, None, None, None, None, None),
(u'right++?', u'right++?', 2, 1, None, None, None, None, None, None),
(u'\U0001f62b++1++.++but++you++but', u'\U0001f62b++1++.++but++you++but', 6, 1, None, None, None, None, None, None),
(u'pity++for++me++.++:-(++@real_trump', u'piti++for++me++.++:-(++@real_trump', 6, 1, None, None, None, None, None, None),
(u'it++?++1++\U0001f62b++1', u'it++?++1++\U0001f62b++1', 5, 1, None, None, None, None, None, None),
(u'tiny++model++,++but++a++big', u'tini++model++,++but++a++big', 6, 1, None, None, None, None, None, None),
(u'right++?++what', u'right++?++what', 3, 1, None, None, None, None, None, None),
(u'bad++surprise++for++me++\U0001f62b', u'bad++surpris++for++me++\U0001f62b', 5, 1, None, None, None, None, None, None),
(u'model++,++which', u'model++,++which', 3, 1, None, None, None, None, None, None),
(u'1++.++but', u'1++.++but', 3, 1, None, None, None, None, None, None),
(u'pity++for', u'piti++for', 2, 1, None, None, None, None, None, None),
(u':p++=)++\U0001f600', u':p++=)++\U0001f600', 3, 1, None, None, None, None, None, None),
(u'me++\U0001f62b', u'me++\U0001f62b', 2, 1, None, None, None, None, None, None),
(u'also++very++pity', u'also++veri++piti', 3, 1, None, None, None, None, None, None),
(u'model++,++which++we++can++use', u'model++,++which++we++can++use', 6, 1, None, None, None, None, None, None),
(u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', u'you++\U0001f600++\U0001f308++\U0001f600++\U0001f308++\U0001f600', 6, 1, u'[1, 3, 2, "IGNOR", "IGNOR", "IGNOR"]', u'[2, 3, 2, "IGNOR", "IGNOR", "IGNOR"]', None, None, u'1', None),
(u'about++it++?', u'about++it++?', 3, 1, None, None, None, None, None, None),
(u'but++i++realy', u'but++i++reali', 3, 1, None, None, None, None, None, None),
(u'liked++it++:p++=)', u'like++it++:p++=)', 4, 1, None, None, None, None, None, None),
(u'do++you', u'do++you', 2, 1, None, None, None, None, None, None),
(u'1++\U0001f62b', u'1++\U0001f62b', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None),
(u':-(', u':-(', 1, 2, u'2', u'2', None, None, u'2', None),
(u'?++1++\U0001f62b++1++.', u'?++1++\U0001f62b++1++.', 5, 1, None, None, None, None, None, None),
(u'.++right++?++what++do++you', u'.++right++?++what++do++you', 6, 1, None, None, None, None, None, None),
(u'big++explanation', u'big++explan', 2, 1, None, None, None, None, None, None),
(u'.++-(++\U0001f62b', u'.++-(++\U0001f62b', 3, 1, None, None, None, None, None, None),
(u'but++a++big++explanation', u'but++a++big++explan', 4, 1, None, None, None, None, None, None),
(u'.++:-(', u'.++:-(', 2, 1, u'[1, 1]', u'[1, 1]', None, None, u'1', None),
(u'glad', u'glad', 1, 1, u'1', u'1', None, None, u'1', None),
(u'a++big++explanation++.++right++?', u'a++big++explan++.++right++?', 6, 1, None, None, None, None, None, None),
(u'you++\U0001f600++\U0001f308', u'you++\U0001f600++\U0001f308', 3, 1, u'[1, 1, 1]', u'[2, 1, 1]', None, None, u'1', None),
(u',++but++i++realy++liked++it', u',++but++i++reali++like++it', 6, 1, None, None, None, None, None, None)]
#p(baseline ,"baseline")
set([ tuple(unicode(item) for item in b ) for b in baseline]).should.be.equal(set([ tuple(unicode(item) for item in b ) for b in right_baseline]))
assert stats._compute_baseline_sum() > 0
@attr(status='stable')
#@wipd
def test_recompute_syntagma_repetativity_scope_621(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
name = self.configer.init_info_data["blogger"]["name"]
language = self.configer.init_info_data["blogger"]["language"]
visibility = self.configer.init_info_data["blogger"]["visibility"]
platform_name = self.configer.init_info_data["blogger"]["platform_name"]
license = self.configer.init_info_data["blogger"]["license"]
template_name = self.configer.init_info_data["blogger"]["template_name"]
version = self.configer.init_info_data["blogger"]["version"]
source = self.configer.init_info_data["blogger"]["source"]
encryption_key = self.configer.init_info_data["blogger"]["encryption_key"]["stats"]
corpus_id = self.configer.init_info_data["blogger"]["id"]["corpus"]
stats_id = self.configer.init_info_data["blogger"]["id"]["stats"]
typ= "stats"
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True, status_bar=True)#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, full_repetativ_syntagma=True, baseline_delimiter="++")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_de))
stats.compute(corp,stream_number=1,adjust_to_cpu=False, freeze_db=False)
stats.optimize_db()
### RepCompution
assert stats._full_repetativ_syntagma == True
assert stats.recompute_syntagma_repetativity_scope(False)
assert stats._full_repetativ_syntagma == False
assert stats.recompute_syntagma_repetativity_scope(True)
assert stats._full_repetativ_syntagma == True
#stats = Stats(mode=self.mode)
stats = Stats(mode=self.mode,use_cash=True, status_bar=True)#, )
stats.init(self.tempdir_project_folder, name, language, visibility, corpus_id=corpus_id, version= version, encryption_key=encryption_key, full_repetativ_syntagma=False, baseline_delimiter="++")
corp = Corpus(mode=self.mode)
corp.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_corp_de))
stats.compute(corp,stream_number=1,adjust_to_cpu=False, freeze_db=False)
stats.optimize_db()
### RepCompution
assert stats._full_repetativ_syntagma == False
assert stats.recompute_syntagma_repetativity_scope(True)
assert stats._full_repetativ_syntagma == True
assert stats.recompute_syntagma_repetativity_scope(False)
assert stats._full_repetativ_syntagma == False
@attr(status='stable')
#@wipd
def test_reconstruct_syntagma_630(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
stats = Stats(mode=self.mode,use_cash=True)#, )
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_de))
indexes = {'repl': {u'pos': 10, u'index_of_repl': 9, u'id': 0, u'polarity': 11, u'in_redu': 12, u'context_infoL2': 20, u'context_infoL3': 18, u'context_infoL1': 22, u'context_infoL4': 16, u'context_infoL5': 14, u'contextR2': 25, u'contextR3': 27, u'contextR1': 23, u'contextR4': 29, u'contextR5': 31, u'rle_word': 6, u'index_in_redufree': 4, u'contextL1': 21, u'contextL4': 15, u'contextL5': 13, u'redufree_len': 2, u'repl_length': 8, u'contextL2': 19, u'contextL3': 17, u'context_infoR1': 24, u'context_infoR2': 26, u'context_infoR3': 28, u'context_infoR4': 30, u'context_infoR5': 32, u'index_in_corpus': 3, u'normalized_word': 5, u'doc_id': 1, u'repl_letter': 7}, 'baseline': {u'occur_repl_uniq': 3, u'syntagma': 0, u'hight_scope_uniq_occur_redu': 8, u'occur_redu_exhausted': 6, u'occur_redu_uniq': 5, u'hight_scope_uniq_occur_repl': 7, u'occur_syntagma_all': 1, u'scope': 2, u'occur_repl_exhausted': 4}, 'redu': {u'orig_words': 6, u'pos': 8, u'id': 0, u'polarity': 9, u'redu_length': 7, u'context_infoL2': 17, u'context_infoL3': 15, u'context_infoL1': 19, u'context_infoL4': 13, u'context_infoL5': 11, u'contextR2': 22, u'contextR3': 24, u'contextR1': 20, u'contextR4': 26, u'contextR5': 28, u'index_in_redufree': 4, u'contextL4': 12, u'contextL5': 10, u'redufree_len': 2, u'contextL1': 18, u'contextL2': 16, u'contextL3': 14, u'context_infoR1': 21, u'context_infoR2': 23, u'context_infoR3': 25, u'context_infoR4': 27, u'context_infoR5': 29, u'index_in_corpus': 3, u'normalized_word': 5, u'doc_id': 1}}
######## rep_type = 'repl'##########
########################
##### order_output_by_syntagma_order = False ######
#######################
######## rep_type = 'repl'##########
### Case 1.1
rep_type = 'repl'
reps = [
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(15, 10000, u'[8]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'e', 4, 5, u'VAPPER', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'\xfcberaschung', u'["NN"]', u'@sch\xf6nesleben', u'["mention"]', u'#machwasdaraus', u'["hashtag"]', u'#bewegedeinarsch', u'["hashtag"]'),
(17, 11111, u'[5, 12]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'e', 4, 5, u'VAPPER', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'sache', u'["NN"]', u'.', u'["symbol"]', u'die', u'["PDS"]', u'aber', u'["ADV"]'),
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
(3, 8888, u'[4, 9]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'n', 3, 4, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]')
]
inp_syntagma_splitted = [u'klitze', u'kleine']
scope = 2
minimum_columns = False
#indexes = self.col_index_min if minimum_columns else self.col_index_orig
order_output_by_syntagma_order = False
right_output = {
8888: {
0: {
0: [u'klitze', (1,)],
1: [u'kleine', (2, 3)]
}},
10000: {
0: {
1: [u'klitze', (15,)]}},
11111: {0: {1: [u'klitze', (17,)]
}}}
right_length = {8888: [4, 9], 10000: [8], 11111: [5, 12]}
output_raw,length = stats._reconstruct_syntagma(rep_type, reps,order_output_by_syntagma_order,indexes)
#p((output_raw,length))
preparated_output = {d:{s:{t:ids for t, ids in s_data.iteritems()} for s, s_data in doc_data.iteritems()} for d, doc_data in output_raw.iteritems()}
#p(preparated_output, "preparated_output")
preparated_output.should.be.equal(right_output)
length.should.be.equal(right_length)
######## rep_type = 'redu'##########
### Case 1.1
rep_type = 'redu'
reps = [
(1, 8888, u'[4, 9]', u'[0, 0]', u'[0, 0]', u'klitze', u'{"klitze": 1, "kli^4tze": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(4, 12222, u'[11]', u'[0, 1]', u'[0, 1]', u'klitze', u'{"klitze": 4}', 4, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'\xfcberaschung', u'["NN"]', u',', u'["symbol"]', u'die', u'["PRELS"]', u'ich', u'["PPER"]'),
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'{"kle^5ine": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]')
]
inp_syntagma_splitted = [u'klitze', u'kleine']
scope = 2
minimum_columns = False
order_output_by_syntagma_order = False
right_output = {
8888: {0:
{
0: [u'klitze', (1,)],
1: [u'kleine', (2,)]}},
12222: {0:
{
1: [u'klitze', (4,)]}}}
right_length ={8888: [4, 9], 12222: [11]}
output_raw,length = stats._reconstruct_syntagma(rep_type, reps, order_output_by_syntagma_order,indexes)
#p((output_raw,length))
preparated_output = {d:{s:{t:ids for t, ids in s_data.iteritems()} for s, s_data in doc_data.iteritems()} for d, doc_data in output_raw.iteritems()}
#p(preparated_output, "preparated_output")
preparated_output.should.be.equal(right_output)
length.should.be.equal(right_length)
# #####################################################################################################################
########################
##### order_output_by_syntagma_order = True ######
#######################
######## rep_type = 'repl'##########
### Case 1.1
rep_type = 'repl'
reps = [
(u'klitze',
[
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(15, 10000, u'[8]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'e', 4, 5, u'VAPPER', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'\xfcberaschung', u'["NN"]', u'@sch\xf6nesleben', u'["mention"]', u'#machwasdaraus', u'["hashtag"]', u'#bewegedeinarsch', u'["hashtag"]'),
(17, 11111, u'[5, 12]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'e', 4, 5, u'VAPPER', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'sache', u'["NN"]', u'.', u'["symbol"]', u'die', u'["PDS"]', u'aber', u'["ADV"]')]),
(u'kleine',
[
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
(3, 8888, u'[4, 9]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'n', 3, 4, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]')])]
inp_syntagma_splitted = [u'klitze', u'kleine']
scope = 2
minimum_columns = False
order_output_by_syntagma_order = True
right_output = {
8888:
{0:
{
0: [u'klitze', (1,)],
1: [u'kleine', (2, 3)]}},
10000: {0: {
1: [u'klitze', (15,)]}},
11111: {0: {
1: [u'klitze', (17,)]}}}
right_length = {8888: [4, 9], 10000: [8], 11111: [5, 12]}
output_raw,length = stats._reconstruct_syntagma(rep_type, reps, order_output_by_syntagma_order,indexes)
#p((output_raw,length))
preparated_output = {d:{s:{t:ids for t, ids in s_data.iteritems()} for s, s_data in doc_data.iteritems()} for d, doc_data in output_raw.iteritems()}
#p(preparated_output, "preparated_output")
preparated_output.should.be.equal(right_output)
length.should.be.equal(right_length)
### Case 1.2
rep_type = 'repl'
reps = (
('number',
(
(20, 11111, u'[5, 12]', u'[1, 6]', u'[1, 6]', u'1', u'1^5', u'1', 5, 0, u'number', u'["neutral", 0.0]', None, u'aber', u'["ADV"]', u'trotzdem', u'["PAV"]', u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 2, u'["number"]', 3, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]'),
(21, 11111, u'[5, 12]', u'[1, 7]', u'[1, 7]', u'2', u'2^4', u'2', 4, 0, u'number', u'["neutral", 0.0]', None, u'trotzdem', u'["PAV"]', u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 3, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None),
(22, 11111, u'[5, 12]', u'[1, 8]', u'[1, 8]', u'3', u'3^5', u'3', 5, 0, u'number', u'["neutral", 0.0]', None, u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None),
(23, 11111, u'[5, 12]', u'[1, 9]', u'[1, 9]', u'4', u'4^4', u'4', 4, 0, u'number', u'["neutral", 0.0]', None, u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 3, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None, None, None))),
('number',
(
(21, 11111, u'[5, 12]', u'[1, 7]', u'[1, 7]', u'2', u'2^4', u'2', 4, 0, u'number', u'["neutral", 0.0]', None, u'trotzdem', u'["PAV"]', u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 3, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None),
(22, 11111, u'[5, 12]', u'[1, 8]', u'[1, 8]', u'3', u'3^5', u'3', 5, 0, u'number', u'["neutral", 0.0]', None, u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None),
(23, 11111, u'[5, 12]', u'[1, 9]', u'[1, 9]', u'4', u'4^4', u'4', 4, 0, u'number', u'["neutral", 0.0]', None, u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 3, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None, None, None),
(24, 11111, u'[5, 12]', u'[1, 10]', u'[1, 10]', u'5', u'5^5', u'5', 5, 0, u'number', u'["neutral", 0.0]', None, u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 3, u'["number"]', 4, u'["number"]', 6, u'["number"]', None, None, None, None, None, None, None, None))),
('number',
(
(22, 11111, u'[5, 12]', u'[1, 8]', u'[1, 8]', u'3', u'3^5', u'3', 5, 0, u'number', u'["neutral", 0.0]', None, u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None),
(23, 11111, u'[5, 12]', u'[1, 9]', u'[1, 9]', u'4', u'4^4', u'4', 4, 0, u'number', u'["neutral", 0.0]', None, u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 3, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None, None, None),
(24, 11111, u'[5, 12]', u'[1, 10]', u'[1, 10]', u'5', u'5^5', u'5', 5, 0, u'number', u'["neutral", 0.0]', None, u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 3, u'["number"]', 4, u'["number"]', 6, u'["number"]', None, None, None, None, None, None, None, None))))
scope = 3
minimum_columns = False
order_output_by_syntagma_order = True
right_output = {
11111: {
1: {
8: [u'number', (22, 22, 22)],
9: [u'number', (23, 23, 23)],
10: [u'number', (24, 24)],
6: [u'number', (20,)],
7: [u'number', (21, 21)]
}}}
right_length = {11111: [5, 12]}
output_raw,length = stats._reconstruct_syntagma(rep_type, reps, order_output_by_syntagma_order,indexes,syntagma_type="pos")
#p((output_raw,length))
preparated_output = {d:{s:{t:ids for t, ids in s_data.iteritems()} for s, s_data in doc_data.iteritems()} for d, doc_data in output_raw.iteritems()}
#p(preparated_output, "preparated_output")
preparated_output.should.be.equal(right_output)
length.should.be.equal(right_length)
@attr(status='stable')
#@wipd
def test_exctract_full_syntagmas_631(self):
stats = Stats(mode=self.mode)
########################
##### scope = 1 ######
#######################
#####################
## scope = 1 ######
###################
### Case 1.1
inp_syntagma_splitted = ("klitze",)
redu_free_elem_length = { 10000:[5,1,1], 11111:[2,5,6]}
scope = 1
reconstructed_syntagmas = {
11111:{
0:{
1:["klitze",(18,)],
},
1:{
1:["klitze",(20,)],
2:["kleine",(22,)],
},
2:{
0:["klitze",(29,)],
2:["klitze",(26,)],
4:["klitze",(27,)],
}
}
}
right_full_syntagmas = (((0, 1),), ((1, 1),), ((2, 0),), ((2, 2),), ((2, 4),))
right_allow_ids = (18, 27, 20, 26, 29)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
#p(set([ True if len(syn)== scope else False for syn in full_syntagmas ,),)
assert False not in set([ True if len(syn)>= scope else False for syn in full_syntagmas ])
########################
##### scope = 2 ######
########################
### Case 2.0
inp_syntagma_splitted = ("klitze","kleine")
redu_free_elem_length = { 10000:[5,1,1], 11111:[2,5,4]}
scope = 2
reconstructed_syntagmas = {
11111:{
0:{
1:["klitze",(18,)],
2:["kleine",(19,)],
},
}
}
right_full_syntagmas = (((0, 1), (0, 2)),)
right_allow_ids = (18, 19)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted)
#p(full_syntagmas, allow_ids)
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
#p(set([ True if len(syn)== scope else False for syn in full_syntagmas ,),)
assert False not in set([ True if len(syn)== scope else False for syn in full_syntagmas ])
### Case 2.1
inp_syntagma_splitted = ("klitze","kleine")
redu_free_elem_length = { 10000:[5,1,1], 11111:[2,5,4]}
scope = 2
reconstructed_syntagmas = {
11111:{
0:{
1:["klitze",(18,)],
2:["kleine",(19,)],
},
1:{
1:["klitze",(20,)],
2:["kleine",(22,)],
}
}
}
right_full_syntagmas = (((0, 1), (0, 2)), ((1, 1), (1, 2)))
right_allow_ids = (18, 19, 20, 22)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted)
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
#p(set([ True if len(syn)== scope else False for syn in full_syntagmas ,),)
assert False not in set([ True if len(syn)== scope else False for syn in full_syntagmas ])
### Case 2.2
inp_syntagma_splitted = ("klitze","kleine")
redu_free_elem_length = { 11111:[2,5,4]}
scope = 2
reconstructed_syntagmas = {
11111:{
0:{
1:["klitze",(18,)],
2:["kleine",(19,)],
},
1:{
1:["klitze",(20,)],
4:["klitze",(22,)],
},
2:{
0:["kleine",(24,554,)],
2:["kleine",(29,56,)],
}
}
}
right_full_syntagmas = (((0, 1), (0, 2)), ((1, 4), (2, 0)))
right_allow_ids = (18, 19, 22, 24, 554)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
#p(set([ True if len(syn)== scope else False for syn in full_syntagmas ,),)
assert False not in set([ True if len(syn)== scope else False for syn in full_syntagmas ])
### Case 2.3
# inp_syntagma_splitted = [u'klitze', u'kleine']
# _rep = [
# (u'klitze',
# [
# (1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
# (15, 10000, u'[8]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'e', 4, 5, u'VAPPER', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'\xfcberaschung', u'["NN"]', u'@sch\xf6nesleben', u'["mention"]', u'#machwasdaraus', u'["hashtag"]', u'#bewegedeinarsch', u'["hashtag"]'),
# (17, 11111, u'[5, 12]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'e', 4, 5, u'VAPPER', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'sache', u'["NN"]', u'.', u'["symbol"]', u'die', u'["PDS"]', u'aber', u'["ADV"]'),),
# (u'kleine',
# [
# (2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
# (3, 8888, u'[4, 9]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'n', 3, 4, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]')
# ,)]
inp_syntagma_splitted = [u'klitze', u'kleine']
redu_free_elem_length = {8888: [4, 9], 10000: [8], 11111: [5, 12]}
scope = 2
reconstructed_syntagmas = {
8888: {0: {
0:["klitze",(1,)],
1:["kleine",(2, 3,)]
}
},
10000: {0: {
1:["klitze",(15,)]
}
},
11111: {0: {
1:["klitze",(17,)]
}
}
}
right_full_syntagmas = (((0, 0), (0, 1)),)
right_allow_ids = (1, 2, 3)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
#p(set([ True if len(syn)== scope else False for syn in full_syntagmas ,),)
assert False not in set([ True if len(syn)== scope else False for syn in full_syntagmas ])
### Case 2.4
#
# inp_syntagma_splitted = [u'klitze', u'kleine']
# _rep = [
# (u'klitze',
# [
# (1, 8888, u'[4, 9]', u'[0, 0]', u'[0, 0]', u'klitze', u'{"klitze": 1, "kli^4tze": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
# (4, 12222, u'[11]', u'[0, 1]', u'[0, 1]', u'klitze', u'{"klitze": 4}', 4, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'\xfcberaschung', u'["NN"]', u',', u'["symbol"]', u'die', u'["PRELS"]', u'ich', u'["PPER"]'),),
# (u'kleine',
# [
# (2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'{"kle^5ine": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]')
# ,)]
inp_syntagma_splitted = [u'klitze', u'kleine']
redu_free_elem_length = {8888: [4, 9], 12222: [11]}
scope = 2
reconstructed_syntagmas = {
8888: {0: {
0:["klitze",(1,)],
1:["kleine",(2,)]
}
},
12222: {0: {
1:["klitze",(4,)]
}
}
}
right_full_syntagmas = (((0, 0), (0, 1)),)
right_allow_ids = (1, 2)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
#p(set([ True if len(syn)== scope else False for syn in full_syntagmas ,),)
assert False not in set([ True if len(syn)== scope else False for syn in full_syntagmas ])
### Case 2.5
# inp_syntagma_splitted = [u'klitze', u'kleine']
# _rep = [
# (u'klitze',
# [
# (1, 8888, u'[4, 9]', u'[0, 0]', u'[0, 0]', u'klitze', u'{"klitze": 1, "kli^4tze": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
# (4, 12222, u'[11]', u'[0, 1]', u'[0, 1]', u'klitze', u'{"klitze": 4}', 4, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'\xfcberaschung', u'["NN"]', u',', u'["symbol"]', u'die', u'["PRELS"]', u'ich', u'["PPER"]'),),
# (u'kleine',
# [
# (2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'{"kle^5ine": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]')
# ,)]
inp_syntagma_splitted = [u'klitze', u'kleine']
redu_free_elem_length = {8888: [4, 9], 12222: [11]}
scope = 2
reconstructed_syntagmas = {
8888: {0: {
0:["klitze",(1,)],
1:["kleine",(2,)],
2:["klitze",(2,)],
3:["kleine",(3,)],
}
},
12222: {0: {
1:["klitze",(4,)]
}
}
}
right_full_syntagmas = (((0, 0), (0, 1)), ((0, 2), (0, 3)))
right_allow_ids = (1, 2, 3)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
#p(set([ True if len(syn)== scope else False for syn in full_syntagmas ,),)
assert False not in set([ True if len(syn)== scope else False for syn in full_syntagmas ])
########################
##### scope = 3 ######
########################
### Case 3.1
inp_syntagma_splitted = ("klitze","kleine","iii")
redu_free_elem_length = {8888:[2, 3], 10000:[3,4], 11111:[2,5,4]}
scope = 3
reconstructed_syntagmas = {
8888:{
0:{
0:["klitze",(1,)],
1:["kleine",(2, 3,)]
},
1:{
0:["iii",(10,)]
},
},
10000:{
0: {
1:["kleine",(15,)]
}
},
11111:{
0:{
1:["kleine",(17,)]
}
}
}
#
right_full_syntagmas = (((0, 0), (0, 1), (1, 0)),)
right_allow_ids = (1, 2, 3, 10)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
assert False not in set([ True if len(syn)== scope else False for syn in full_syntagmas ])
########################
##### scope = 3 ######
########################
### Case 3.2
inp_syntagma_splitted = ("klitze","kleine","iii")
redu_free_elem_length = {8888:[4, 5], 10000:[5,1,1], 11111:[2,5,4]}
scope = 3
reconstructed_syntagmas = {
8888:{
0:{
0:["klitze",(1,)],
1:["kleine",(2, 3,)],
2:["iii",(4, 5,)],
},
1:{
1:["klitze",(10,)],
2:["kleine",(11,)],
3:["i",(12,)],
},
},
}
#
right_full_syntagmas = (((0, 0), (0, 1), (0, 2)), ((1, 1), (1, 2), (1, 3)))
right_allow_ids = (1, 2, 3, 4, 5, 10, 11, 12)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
assert False not in set([ True if len(syn)== scope else False for syn in full_syntagmas ])
########################
##### scope = 3 ######
########################
### Case 3.3
inp_syntagma_splitted = ("klitze","kleine","iii")
redu_free_elem_length = { 10000:[5,1,1], 11111:[2,5,4]}
scope = 3
reconstructed_syntagmas = {
10000:{
0: {
4:["klitze",(15,)],
},
1: {
0:["kleine",(16,)],
},
2: {
0:["iii",(17,)],
},
}
}
right_full_syntagmas = (((0, 4), (1, 0), (2, 0)),)
right_allow_ids = (15, 16, 17)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted)
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
assert False not in set([ True if len(syn)== scope else False for syn in full_syntagmas ])
########################
##### scope = 6 ######
########################
### Case 6.1
inp_syntagma_splitted = ("klitze","kleine","iii",",","oder","wie")
redu_free_elem_length = { 10000:[5,2,2,1], 11111:[2,5,4]}
scope = 6
reconstructed_syntagmas = {
10000:{
0: {
4:["klitze",(15,)],
},
1: {
0:["kleine",(16,)],
1:["iii",(19,)],
},
2: {
0:[",",(17,)],
1:["oder",(34,)],
},
3: {
0:["wie",(17,)],
},
}
}
right_full_syntagmas = (((0, 4), (1, 0), (1, 1), (2, 0), (2, 1), (3, 0)),)
right_allow_ids = (15, 16, 19, 17, 34, 17)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
assert False not in set([ True if len(syn)== scope else False for syn in full_syntagmas ])
### Case 6.2
inp_syntagma_splitted = ("klitze","kleine","iii",",","oder","wie")
redu_free_elem_length = { 10000:[10,3,4,5], 11111:[7,4,2,1]}
scope = 6
reconstructed_syntagmas = {
10000:{
0: {
0:["klitze",(15,)],
1:["kleine",(16,)],
2:["iii",(17,)],
3:[",",(18,)],
4:["oder",(19,)],
5:["wie",(20,)],
},
1: {
0:["klitze",(164,)],
1:["kleine",(193,)],
},
2: {
0:["klitze",(172,)],
1:["kleine",(343,)],
},
},
11111:{
0: {
4:["klitze",(23,)],
5:["kleine",(25,)],
6:["iii",(64,)],
},
1: {
0:[",",(152,)],
1:["oder",(114,)],
2:["wie",(1,)],
},
2: {
0:["klitze",(147,)],
1:["kleine",(344,)],
},
3: {
0:["iii",(178,)],
},
}
}
right_full_syntagmas = (((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5)), ((0, 4), (0, 5), (0, 6), (1, 0), (1, 1), (1, 2)))
right_allow_ids = (15, 16, 17, 18, 19, 20, 23, 25, 64, 152, 114, 1)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
assert False not in set([ True if len(syn)== scope else False for syn in full_syntagmas ])
# ### Case 6.3
inp_syntagma_splitted = ("klitze","kleine","iii",",","oder","wie")
redu_free_elem_length = { 10000:[10,10,10,10], 11111:[7,4,2,1]}
scope = 6
reconstructed_syntagmas = {
10000:{
0: {
2:["klitze",(15,)],
3:["kleine",(16,)],
4:["iii",(17,)],
6:[",",(18,)],
7:["oder",(19,)],
8:["wie",(20,)],
},
2: {
4:["klitze",(152,)],
5:["kleine",(163,)],
6:["iii",(174,)],
7:[",",(185,)],
8:["oder",(196,)],
9:["wie",(207,)],
},
3: {
1:["klitze",(150,)],
2:["kleine",(160,)],
3:["iii",(170,)],
4:[",",(180,)],
5:["oder",(190,)],
6:["wie",(200,)],
},
},
}
right_full_syntagmas = (((2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (2, 9)), ((3, 1), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6)))
right_allow_ids = (160, 163, 196, 200, 170, 174, 207, 180, 150, 152, 185, 190)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
assert False not in set([ True if len(syn)== scope else False for syn in full_syntagmas ])
###############################
##############################
### mixes scope ############
### Case __.1
syntagma_type = "pos"
inp_syntagma_splitted = [u'number', u'number', u'number']
redu_free_elem_length = {11111: [5, 6, 14]}
scope = 3
reconstructed_syntagmas = {
11111: {
2: {
8: [u'number', (33,)],
9: [u'number', (34, 34)],
10: [u'number', (35, 35, 35)],
11: [u'number', (36, 36, 36)],
12: [u'number', (37, 37)]
}
}
}
right_full_syntagmas = (((2, 8), (2, 9), (2, 10)),)
right_allow_ids = (33, 34, 35)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted,syntagma_type=syntagma_type)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
assert False not in set([ True if len(syn)>= scope else False for syn in full_syntagmas ])
### Case __.2
syntagma_type = "pos"
inp_syntagma_splitted = ("NN","NP","NN")
redu_free_elem_length = {11111: [5, 12]}
scope = 3
reconstructed_syntagmas = {
11111: {
1: {
6: ["NN",(22, 22, 22)],
7: ["NP",(23, 23, 23)],
8: ["NN",(24, 24)],
9: ["NN",(221, 221, 221)],
10: ["NP",(232, 233, 236)],
11: ["NN",(240, 248)],
}
}
}
right_full_syntagmas = (((1, 6), (1, 7), (1, 8)), ((1, 9), (1, 10), (1, 11)))
right_allow_ids = (232, 233, 236, 240, 24, 22, 23, 248, 221)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted,syntagma_type=syntagma_type)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
assert False not in set([ True if len(syn)>= scope else False for syn in full_syntagmas ])
### Case __.3
syntagma_type = "pos"
inp_syntagma_splitted = ("NN","NP","NN","NP")
redu_free_elem_length = {11111: [5, 12]}
scope = 4
reconstructed_syntagmas = {
11111: {
1: {
6: ["NN",(22, 22, 22)],
7: ["NP",(23, 23, 23)],
8: ["NN",(24, 24)],
9: ["NP",(221, 221, 221)],
10: ["NN",(232, 233, 236)],
11: ["NP",(240, 248)],
}
}
}
right_full_syntagmas = (((1, 6), (1, 7), (1, 8), (1, 9)),)
right_allow_ids = (24, 221, 22, 23)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted,syntagma_type=syntagma_type)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
assert False not in set([ True if len(syn)>= scope else False for syn in full_syntagmas ])
### Case __.4
syntagma_type = "lexem"
inp_syntagma_splitted = ("klitze","kleine", "klitze")
redu_free_elem_length = {11111: [5, 15]}
scope = 3
reconstructed_syntagmas = {
11111: {
1: {
6: ["klitze",(22, 22, 22)],
7: ["kleine",(23, 23, 23)],
8: ["klitze",(24, 24)],
9: ["klitze",(22, 22, 22)],
10: ["kleine",(23, 23, 23)],
11: ["klitze",(24, 24)],
}
}
}
right_full_syntagmas = (((1, 6), (1, 7), (1, 8)), ((1, 9), (1, 10), (1, 11)))
right_allow_ids = (24, 22, 23)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted,syntagma_type=syntagma_type)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
assert False not in set([ True if len(syn)>= scope else False for syn in full_syntagmas ])
### Case __.4
syntagma_type = "lexem"
inp_syntagma_splitted = ("klitze","kleine", "klitze", "kleine")
redu_free_elem_length = {11111: [5, 15]}
scope = 4
reconstructed_syntagmas = {
11111: {
1: {
6: ["klitze",(22, 22, 22)],
7: ["kleine",(23, 23, 23)],
8: ["klitze",(22, 22, 22)],
9: ["kleine",(23, 23, 23)],
10: ["klitze",(22, 22, 22)],
11: ["kleine",(23, 23, 23)],
}
}
}
right_full_syntagmas = (((1, 6), (1, 7), (1, 8), (1, 9)),)
right_allow_ids = (22, 23)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted,syntagma_type=syntagma_type)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
assert False not in set([ True if len(syn)>= scope else False for syn in full_syntagmas ])
### Case __.5
syntagma_type = "lexem"
inp_syntagma_splitted = ("klitze","kleine",)
redu_free_elem_length = {11111: [5, 15]}
scope = 2
reconstructed_syntagmas = {
11111: {
1: {
6: ["klitze",(22, 22, 22)],
7: ["kleine",(23, 23, 23)],
8: ["klitze",(22, 22, 22)],
9: ["kleine",(23, 23, 23)],
10: ["klitze",(22, 22, 22)],
11: ["kleine",(23, 23, 23)],
}
}
}
right_full_syntagmas = (((1, 6), (1, 7)), ((1, 8), (1, 9)), ((1, 10), (1, 11)))
right_allow_ids = (22, 23)
full_syntagmas, allow_ids = stats._exctract_full_syntagmas(reconstructed_syntagmas, scope,redu_free_elem_length,inp_syntagma_splitted,syntagma_type=syntagma_type)
#p((full_syntagmas, allow_ids))
full_syntagmas.should.be.equal(right_full_syntagmas)
set(allow_ids).should.be.equal(set(right_allow_ids))
assert False not in set([ True if len(syn)>= scope else False for syn in full_syntagmas ])
@attr(status='stable')
#@wipd
def test_filter_full_rep_syn_632(self):
stats = Stats(mode=self.mode)
#############################################################
####### order_output_by_syntagma_order = True ####
###############################################################
#####rep_type = 'repl'######
### Case 1.1
rep_type = 'repl'
_rep = (
(u'klitze',
(
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(15, 10000, u'[8]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'e', 4, 5, u'VAPPER', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'\xfcberaschung', u'["NN"]', u'@sch\xf6nesleben', u'["mention"]', u'#machwasdaraus', u'["hashtag"]', u'#bewegedeinarsch', u'["hashtag"]'),
(17, 11111, u'[5, 12]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'e', 4, 5, u'VAPPER', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'sache', u'["NN"]', u'.', u'["symbol"]', u'die', u'["PDS"]', u'aber', u'["ADV"]'))),
(u'kleine',
(
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
(3, 8888, u'[4, 9]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'n', 3, 4, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]')
)))
allowed_ids = set((1, 2, 3))
order_output_by_syntagma_order = True
right_filtered_reps = (
(u'klitze',
(
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),)),
(u'kleine',
(
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
(3, 8888, u'[4, 9]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'n', 3, 4, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]')
)))
filtered_reps = stats._filter_full_rep_syn(rep_type,_rep, allowed_ids,order_output_by_syntagma_order, 0)
#p((filtered_reps))
tuple(filtered_reps).should.be.equal(tuple(right_filtered_reps))
#####rep_type = 'redu'######
### Case 2.1
rep_type = 'redu'
_rep = (
(u'klitze',
(
(1, 8888, u'[4, 9]', u'[0, 0]', u'[0, 0]', u'klitze', u'{"klitze": 1, "kli^4tze": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(4, 12222, u'[11]', u'[0, 1]', u'[0, 1]', u'klitze', u'{"klitze": 4}', 4, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'\xfcberaschung', u'["NN"]', u',', u'["symbol"]', u'die', u'["PRELS"]', u'ich', u'["PPER"]'))),
(u'kleine',
(
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'{"kle^5ine": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
)))
allowed_ids = set((1, 2))
order_output_by_syntagma_order = True
right_filtered_reps = (
(u'klitze',
(
(1, 8888, u'[4, 9]', u'[0, 0]', u'[0, 0]', u'klitze', u'{"klitze": 1, "kli^4tze": 1}', 2, u'NN', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),)),
(u'kleine',
(
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'{"kle^5ine": 1, "klein^3e": 1}', 2, u'NE', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
)))
filtered_reps = stats._filter_full_rep_syn(rep_type,_rep, allowed_ids,order_output_by_syntagma_order, 0)
#p((filtered_reps))
tuple(filtered_reps).should.be.equal(tuple(right_filtered_reps))
# ########################################################################################################################
# ########################################################################################################################
# ########################################################################################################################
# #############################################################
# ####### order_output_by_syntagma_order = False ####
# ############################################################
# #####rep_type = 'redu'######
### Case 1.1
rep_type = 'redu'
_rep = (
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(15, 10000, u'[8]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'e', 4, 5, u'VAPPER', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'\xfcberaschung', u'["NN"]', u'@sch\xf6nesleben', u'["mention"]', u'#machwasdaraus', u'["hashtag"]', u'#bewegedeinarsch', u'["hashtag"]'),
(17, 11111, u'[5, 12]', u'[0, 1]', u'[0, 1]', u'klitze', u'klitze^4', u'e', 4, 5, u'VAPPER', u'["neutral", 0.0]', None, None, None, None, None, None, None, None, None, u'eine', u'["ART"]', u'kleine', u'["ADJA"]', u'sache', u'["NN"]', u'.', u'["symbol"]', u'die', u'["PDS"]', u'aber', u'["ADV"]'),
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
(3, 8888, u'[4, 9]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'n', 3, 4, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]')
)
allowed_ids = set((1, 2))
order_output_by_syntagma_order = False
right_filtered_reps = (
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]')
)
filtered_reps = stats._filter_full_rep_syn(rep_type,_rep, allowed_ids,order_output_by_syntagma_order, 0)
#p((filtered_reps))
tuple(filtered_reps).should.be.equal(tuple(right_filtered_reps))
@attr(status='stable')
#@wipd
def test_delete_dublicats_in_reps_633(self):
stats = Stats(mode=self.mode)
#############################################################
####### order_output_by_syntagma_order = True ####
###############################################################
### Case 1.1
order_output_by_syntagma_order = True
_rep = (
(u'klitze',
(
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
)),
(u'kleine',
(
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
(3, 8888, u'[4, 9]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'n', 3, 4, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
(3, 8888, u'[4, 9]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'n', 3, 4, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]')
)))
right_dublicates_free = (
(u'klitze',
(
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
)),
(u'kleine',
(
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
(3, 8888, u'[4, 9]', u'[0, 3]', u'[0, 1]', u'kleine', u'klein^3e', u'n', 3, 4, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]')
)))
dublicats_free = stats._delete_dublicats_in_reps(_rep, order_output_by_syntagma_order, 0)
#p((dublicats_free))
tuple(right_dublicates_free).should.be.equal(tuple(dublicats_free))
### Case 1.2
stats._full_repetativ_syntagma = True
order_output_by_syntagma_order = True
_rep = (
(u'klitze',
(
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
)),
(u'kleine',
(
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
)))
right_dublicates_free = ()
dublicats_free = stats._delete_dublicats_in_reps(_rep, order_output_by_syntagma_order, 0)
#p((dublicats_free))
tuple(right_dublicates_free).should.be.equal(tuple(dublicats_free))
### Case 1.3
stats._full_repetativ_syntagma = False
order_output_by_syntagma_order = True
_rep = (
(u'klitze',
(
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
)),
(u'kleine',
(
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
)))
right_dublicates_free = (
(u'klitze', ()),
(u'kleine',
(
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
)))
dublicats_free = stats._delete_dublicats_in_reps(_rep, order_output_by_syntagma_order, 0)
#p((dublicats_free))
tuple(right_dublicates_free).should.be.equal(tuple(dublicats_free))
### Case 1.4
stats._full_repetativ_syntagma = True
order_output_by_syntagma_order = True
_rep = (
('number',
(
(20, 11111, u'[5, 12]', u'[1, 6]', u'[1, 6]', u'1', u'1^5', u'1', 5, 0, u'number', u'["neutral", 0.0]', None, u'aber', u'["ADV"]', u'trotzdem', u'["PAV"]', u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 2, u'["number"]', 3, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]'),
(21, 11111, u'[5, 12]', u'[1, 7]', u'[1, 7]', u'2', u'2^4', u'2', 4, 0, u'number', u'["neutral", 0.0]', None, u'trotzdem', u'["PAV"]', u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 3, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None),
(22, 11111, u'[5, 12]', u'[1, 8]', u'[1, 8]', u'3', u'3^5', u'3', 5, 0, u'number', u'["neutral", 0.0]', None, u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None),
(23, 11111, u'[5, 12]', u'[1, 9]', u'[1, 9]', u'4', u'4^4', u'4', 4, 0, u'number', u'["neutral", 0.0]', None, u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 3, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None, None, None))),
('number',
(
(21, 11111, u'[5, 12]', u'[1, 7]', u'[1, 7]', u'2', u'2^4', u'2', 4, 0, u'number', u'["neutral", 0.0]', None, u'trotzdem', u'["PAV"]', u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 3, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None),
(22, 11111, u'[5, 12]', u'[1, 8]', u'[1, 8]', u'3', u'3^5', u'3', 5, 0, u'number', u'["neutral", 0.0]', None, u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None),
(23, 11111, u'[5, 12]', u'[1, 9]', u'[1, 9]', u'4', u'4^4', u'4', 4, 0, u'number', u'["neutral", 0.0]', None, u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 3, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None, None, None),
(24, 11111, u'[5, 12]', u'[1, 10]', u'[1, 10]', u'5', u'5^5', u'5', 5, 0, u'number', u'["neutral", 0.0]', None, u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 3, u'["number"]', 4, u'["number"]', 6, u'["number"]', None, None, None, None, None, None, None, None))),
('number',
(
(22, 11111, u'[5, 12]', u'[1, 8]', u'[1, 8]', u'3', u'3^5', u'3', 5, 0, u'number', u'["neutral", 0.0]', None, u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None),
(23, 11111, u'[5, 12]', u'[1, 9]', u'[1, 9]', u'4', u'4^4', u'4', 4, 0, u'number', u'["neutral", 0.0]', None, u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 3, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None, None, None),
(24, 11111, u'[5, 12]', u'[1, 10]', u'[1, 10]', u'5', u'5^5', u'5', 5, 0, u'number', u'["neutral", 0.0]', None, u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 3, u'["number"]', 4, u'["number"]', 6, u'["number"]', None, None, None, None, None, None, None, None))))
right_dublicates_free = (
('number',
(
(20, 11111, u'[5, 12]', u'[1, 6]', u'[1, 6]', u'1', u'1^5', u'1', 5, 0, u'number', u'["neutral", 0.0]', None, u'aber', u'["ADV"]', u'trotzdem', u'["PAV"]', u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 2, u'["number"]', 3, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]'),)),
('number',
(
(21, 11111, u'[5, 12]', u'[1, 7]', u'[1, 7]', u'2', u'2^4', u'2', 4, 0, u'number', u'["neutral", 0.0]', None, u'trotzdem', u'["PAV"]', u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 3, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None),)),
('number',
(
(22, 11111, u'[5, 12]', u'[1, 8]', u'[1, 8]', u'3', u'3^5', u'3', 5, 0, u'number', u'["neutral", 0.0]', None, u'wichtig', u'["ADJA"]', u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 4, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None),
(23, 11111, u'[5, 12]', u'[1, 9]', u'[1, 9]', u'4', u'4^4', u'4', 4, 0, u'number', u'["neutral", 0.0]', None, u'ist', u'["NN"]', u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 3, u'["number"]', 5, u'["number"]', 6, u'["number"]', None, None, None, None, None, None),
(24, 11111, u'[5, 12]', u'[1, 10]', u'[1, 10]', u'5', u'5^5', u'5', 5, 0, u'number', u'["neutral", 0.0]', None, u'!', u'["symbol"]', 1, u'["number"]', 2, u'["number"]', 3, u'["number"]', 4, u'["number"]', 6, u'["number"]', None, None, None, None, None, None, None, None)
)))
dublicats_free = stats._delete_dublicats_in_reps(_rep, order_output_by_syntagma_order, 0)
#p((dublicats_free))
tuple(right_dublicates_free).should.be.equal(tuple(dublicats_free))
# #############################################################
# ####### order_output_by_syntagma_order = False ####
# ############################################################
# #####rep_type = 'redu'######
### Case 2.1
order_output_by_syntagma_order = False
_rep = (
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
)
right_dublicates_free = (
(1, 8888, u'[4, 9]', u'[0, 1]', u'[0, 0]', u'klitze', u'kli^4tze', u'i', 4, 2, u'NN', u'["neutral", 0.0]', u'[0, 0]', None, None, None, None, None, None, None, None, None, None, u'kleine', u'["NE", {"kle^5ine": 1, "klein^3e": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]'),
(2, 8888, u'[4, 9]', u'[0, 2]', u'[0, 1]', u'kleine', u'kle^5ine', u'e', 5, 2, u'NE', u'["neutral", 0.0]', u'[0, 2]', None, None, None, None, None, None, None, None, u'klitze', u'["NN", {"klitze": 1, "kli^4tze": 1}]', u'\xfcberaschung', u'["NN"]', u'.', u'["symbol"]', u'trotzdem', u'["PAV"]', u'hat', u'["VAFIN"]', u'sie', u'["PPER"]'),
)
dublicats_free = stats._delete_dublicats_in_reps(_rep, order_output_by_syntagma_order, 0)
#p((dublicats_free))
tuple(dublicats_free).should.be.equal(tuple(right_dublicates_free))
def get_dict_rows_from_csv(self,fname):
with open(fname+".csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter=';')
#print readCSV
columns = readCSV.next()
for row in readCSV:
if row[0]:
#print(row)
yield {k:v for k,v in zip(columns, row) if k}
def get_list_rows_from_csv(self,fname):
with open(fname+".csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter=';')
#print readCSV
columns = readCSV.next()
for row in readCSV:
if row[0]:
#print(row)
yield row
####### 700 #######
@attr(status='stable')
#@wipd
def test_compute_rep_sum_700(self):
self.prj_folder()
#self.blogger_corpus()
self.test_dbs()
stats = Stats(mode=self.mode,use_cash=True)#, )
stats.open(os.path.join(self.tempdir_testdbs,self.db_blogger_plaintext_stats_de))
################################################
#### "repl" ####
################################################
## Case 1.1
summery = stats.compute_rep_sum("*", "repl")
summery = {letter:{rep_num:[rep_num_data[0], dict(rep_num_data[1])] for rep_num, rep_num_data in letter_data.iteritems()} for letter, letter_data in summery.iteritems()}
#p(summery)
right_summery = {u'a': {3: [1, {u'\xfcbe^4r^5a^3schun^6g^3': 1}], 4: [1, {u'ka^4n^5': 1}], 6: [1, {u'ta^6g^6': 1}]}, u'z': {3: [1, {u'kli^4tz^3': 1}]}, u'e': {8: [2, {u'kle^4i^5n^4e^8': 1, u'ble^8ibt': 1}], 3: [4, {u'kle^3i^3n^3': 1, u'klein^4e^3s^4': 2, u'kleine^3r^2e^5': 1}], 4: [10, {u'\xfcbe^4r^5a^3schun^6g^3': 1, u'ble^4ibt': 1, u'kle^4i^5n^4e^8': 1, u'kle^4i^5n^3e^2s^3': 2, u'eine^4': 1, u'kleine^4s^7': 2, u'klitze^4': 2}], 5: [6, {u'kle^5ine': 1, u'kleinere^5': 1, u'kle^5in^5e': 1, u'kle^5i^2n^4e^5': 2, u'kleine^3r^2e^5': 1}], 7: [1, {u'kli^4tze^7': 1}]}, u'g': {3: [1, {u'\xfcbe^4r^5a^3schun^6g^3': 1}], 6: [1, {u'ta^6g^6': 1}]}, u'4': {4: [1, {u'4^4': 1}]}, u')': {3: [3, {u'-)^3': 3}], 4: [1, {u':-)^4': 1}]}, u'h': {3: [1, {u'auswah^3l^4': 1}]}, u'l': {4: [1, {u'auswah^3l^4': 1}]}, u'n': {3: [5, {u'kle^4i^5n^3e^2s^3': 2, u'kle^3i^3n^3': 1, u'klein^3e^2s': 1, u'klein^3e': 1}], 4: [4, {u'kle^4i^5n^4e^8': 1, u'klein^4e^3s^4': 2, u'kle^5i^2n^4e^5': 1}], 5: [5, {u'klein^5': 1, u'ka^4n^5': 1, u'kle^5in^5e': 1, u'm\xe4dchen^5': 2}], 6: [2, {u'\xfcbe^4r^5a^3schun^6g^3': 1, u'kan^6': 1}]}, u'1': {8: [1, {u'1^8': 1}], 5: [1, {u'1^5': 1}], 6: [1, {u'1^6': 1}]}, u'i': {8: [1, {u'wichti^8g': 1}], 3: [2, {u'kle^3i^3n^3': 1, u'kli^3tzes^3': 1}], 4: [3, {u'kli^4tz': 1, u'kli^4tze^7': 1, u'kli^4tz^3': 1}], 5: [4, {u'kle^4i^5n^3e^2s^3': 2, u'kle^4i^5n^4e^8': 1, u'geni^5es^8t^5': 1}]}, u'3': {5: [1, {u'3^5': 1}]}, u'r': {4: [2, {u'\xfcber^4aschung': 2}], 5: [2, {u'\xfcbe^4r^5a^3schun^6g^3': 1, u'\xfcber^5aschung': 1}]}, u'5': {5: [1, {u'5^5': 1}]}, u'\U0001f600': {5: [1, {u'\U0001f600^5': 1}]}, u'2': {4: [1, {u'2^4': 1}]}, u's': {8: [1, {u'geni^5es^8t^5': 1}], 3: [3, {u'kle^4i^5n^3e^2s^3': 2, u'kli^3tzes^3': 1}], 4: [5, {u'kleines^4': 1, u'klitzes^4': 1, u'genies^4t^2': 1, u'klein^4e^3s^4': 2}], 6: [1, {u'is^6t': 1}], 7: [2, {u'kleine^4s^7': 2}]}, u'.': {5: [2, {u'.^5': 2}]}, u'u': {12: [1, {u'hu^12ngrig': 1}]}, u'\U0001f62b': {4: [1, {u'\U0001f62b^4': 1}]}, u't': {5: [1, {u'geni^5es^8t^5': 1}]}}
summery.should.be.equal(right_summery)
# i=0
# for letter, letter_data in summery.iteritems():
# for rep_num, rep_num_data in letter_data.iteritems():
# i+=1
# #print repr(letter), rep_num,rep_num_data
# print i, letter,rep_num, rep_num_data
# print "\n\n\n"
#### "repl" ####
## Case 1.2
summery = stats.compute_rep_sum("*", "repl", ignore_num=True, ignore_symbol=True)
summery = {letter:{rep_num:[rep_num_data[0], dict(rep_num_data[1])] for rep_num, rep_num_data in letter_data.iteritems()} for letter, letter_data in summery.iteritems()}
#p(summery)
right_summery = {u'a': {3: [1, {u'\xfcbe^4r^5a^3schun^6g^3': 1}], 4: [1, {u'ka^4n^5': 1}], 6: [1, {u'ta^6g^6': 1}]}, u'e': {8: [2, {u'kle^4i^5n^4e^8': 1, u'ble^8ibt': 1}], 3: [4, {u'kle^3i^3n^3': 1, u'klein^4e^3s^4': 2, u'kleine^3r^2e^5': 1}], 4: [10, {u'\xfcbe^4r^5a^3schun^6g^3': 1, u'ble^4ibt': 1, u'kle^4i^5n^4e^8': 1, u'kle^4i^5n^3e^2s^3': 2, u'eine^4': 1, u'kleine^4s^7': 2, u'klitze^4': 2}], 5: [6, {u'kle^5ine': 1, u'kleinere^5': 1, u'kle^5in^5e': 1, u'kle^5i^2n^4e^5': 2, u'kleine^3r^2e^5': 1}], 7: [1, {u'kli^4tze^7': 1}]}, u'g': {3: [1, {u'\xfcbe^4r^5a^3schun^6g^3': 1}], 6: [1, {u'ta^6g^6': 1}]}, u'i': {8: [1, {u'wichti^8g': 1}], 3: [2, {u'kle^3i^3n^3': 1, u'kli^3tzes^3': 1}], 4: [3, {u'kli^4tz': 1, u'kli^4tze^7': 1, u'kli^4tz^3': 1}], 5: [4, {u'kle^4i^5n^3e^2s^3': 2, u'kle^4i^5n^4e^8': 1, u'geni^5es^8t^5': 1}]}, u'h': {3: [1, {u'auswah^3l^4': 1}]}, u'l': {4: [1, {u'auswah^3l^4': 1}]}, u'n': {3: [5, {u'kle^4i^5n^3e^2s^3': 2, u'kle^3i^3n^3': 1, u'klein^3e^2s': 1, u'klein^3e': 1}], 4: [4, {u'kle^4i^5n^4e^8': 1, u'klein^4e^3s^4': 2, u'kle^5i^2n^4e^5': 1}], 5: [5, {u'klein^5': 1, u'ka^4n^5': 1, u'kle^5in^5e': 1, u'm\xe4dchen^5': 2}], 6: [2, {u'\xfcbe^4r^5a^3schun^6g^3': 1, u'kan^6': 1}]}, u')': {3: [3, {u'-)^3': 3}], 4: [1, {u':-)^4': 1}]}, u's': {8: [1, {u'geni^5es^8t^5': 1}], 3: [3, {u'kle^4i^5n^3e^2s^3': 2, u'kli^3tzes^3': 1}], 4: [5, {u'kleines^4': 1, u'klitzes^4': 1, u'genies^4t^2': 1, u'klein^4e^3s^4': 2}], 6: [1, {u'is^6t': 1}], 7: [2, {u'kleine^4s^7': 2}]}, u'r': {4: [2, {u'\xfcber^4aschung': 2}], 5: [2, {u'\xfcbe^4r^5a^3schun^6g^3': 1, u'\xfcber^5aschung': 1}]}, u'u': {12: [1, {u'hu^12ngrig': 1}]}, u'\U0001f600': {5: [1, {u'\U0001f600^5': 1}]}, u'z': {3: [1, {u'kli^4tz^3': 1}]}, u'\U0001f62b': {4: [1, {u'\U0001f62b^4': 1}]}, u't': {5: [1, {u'geni^5es^8t^5': 1}]}}
summery.should.be.equal(right_summery)
#### "repl" ####
## Case 1.3
summery = stats.compute_rep_sum("*", "repl", ignore_num=True, ignore_symbol=True, word_examples_sum_table=False)
summery = {letter:{rep_num:[rep_num_data[0]] for rep_num, rep_num_data in letter_data.iteritems()} for letter, letter_data in summery.iteritems()}
#p(summery)
right_summery = {u'a': {3: [1], 4: [1], 6: [1]}, u'e': {8: [2], 3: [4], 4: [10], 5: [6], 7: [1]}, u'g': {3: [1], 6: [1]}, u'i': {8: [1], 3: [2], 4: [3], 5: [4]}, u'h': {3: [1]}, u'l': {4: [1]}, u'n': {3: [5], 4: [4], 5: [5], 6: [2]}, u')': {3: [3], 4: [1]}, u's': {8: [1], 3: [3], 4: [5], 6: [1], 7: [2]}, u'r': {4: [2], 5: [2]}, u'u': {12: [1]}, u'\U0001f600': {5: [1]}, u'z': {3: [1]}, u'\U0001f62b': {4: [1]}, u't': {5: [1]}}
summery.should.be.equal(right_summery)
#### "repl" ####
## Case 1.4
summery = stats.compute_rep_sum("*", "repl", ignore_num=True, ignore_symbol=True,sentiment="positive")
summery = summery = {letter:{rep_num:[rep_num_data[0], dict(rep_num_data[1])] for rep_num, rep_num_data in letter_data.iteritems()} for letter, letter_data in summery.iteritems()}
#p(summery)
right_summery = {u')': {3: [3, {u'-)^3': 3}], 4: [1, {u':-)^4': 1}]}, u'\U0001f600': {5: [1, {u'\U0001f600^5': 1}]}}
summery.should.be.equal(right_summery)
#### "repl" ####
## Case 1.5
summery = stats.compute_rep_sum(["klitze"], "repl", ignore_num=True, ignore_symbol=True,word_examples_sum_table=True)
summery = summery = {letter:{rep_num:[rep_num_data[0], dict(rep_num_data[1])] for rep_num, rep_num_data in letter_data.iteritems()} for letter, letter_data in summery.iteritems()}
#p(summery)
right_summery = {u'i': {4: [1, {u'kli^4tze^7': 1}]}, u'e': {4: [2, {u'klitze^4': 2}], 7: [1, {u'kli^4tze^7': 1}]}}
summery.should.be.equal(right_summery)
#### "repl" ####
## Case 1.6
summery = stats.compute_rep_sum(["klitze", "kleine"], "repl", ignore_num=True, ignore_symbol=True)
summery = {letter:{rep_num:[rep_num_data[0], dict(rep_num_data[1])] for rep_num, rep_num_data in letter_data.iteritems()} for letter, letter_data in summery.iteritems()}
#p(summery)
right_summery = {u'i': {4: [1, {u'kli^4tze^7': 1}]}, u'e': {4: [1, {u'klitze^4': 1}], 5: [2, {u'kle^5ine': 1, u'kle^5in^5e': 1}], 7: [1, {u'kli^4tze^7': 1}]}, u'n': {3: [1, {u'klein^3e': 1}], 5: [1, {u'kle^5in^5e': 1}]}}
summery.should.be.equal(right_summery)
#### "repl" ####
## Case 1.7
summery = stats.compute_rep_sum(["klitze", "kleine"], "repl", ignore_num=True, ignore_symbol=True,stemmed_search=True)
summery = {letter:{rep_num:[rep_num_data[0], dict(rep_num_data[1])] for rep_num, rep_num_data in letter_data.iteritems()} for letter, letter_data in summery.iteritems()}
#p(summery)
right_summery = {u'i': {3: [2, {u'kle^3i^3n^3': 1, u'kli^3tzes^3': 1}], 4: [3, {u'kli^4tz': 1, u'kli^4tze^7': 1, u'kli^4tz^3': 1}]}, u's': {3: [1, {u'kli^3tzes^3': 1}], 4: [2, {u'klitzes^4': 1, u'kleines^4': 1}]}, u'z': {3: [1, {u'kli^4tz^3': 1}]}, u'e': {3: [1, {u'kle^3i^3n^3': 1}], 4: [1, {u'klitze^4': 1}], 5: [2, {u'kle^5ine': 1, u'kle^5in^5e': 1}], 7: [1, {u'kli^4tze^7': 1}]}, u'n': {3: [3, {u'klein^3e^2s': 1, u'kle^3i^3n^3': 1, u'klein^3e': 1}], 5: [2, {u'kle^5in^5e': 1, u'klein^5': 1}]}}
summery.should.be.equal(right_summery)
#### "repl" ####
## Case 1.5
summery = stats.compute_rep_sum(["klitze"], "repl", ignore_num=True, ignore_symbol=True)
summery = {letter:{rep_num:[rep_num_data[0], dict(rep_num_data[1])] for rep_num, rep_num_data in letter_data.iteritems()} for letter, letter_data in summery.iteritems()}
#p(summery)
right_summery = {u'i': {4: [1, {u'kli^4tze^7': 1}]}, u'e': {4: [2, {u'klitze^4': 2}], 7: [1, {u'kli^4tze^7': 1}]}}
summery.should.be.equal(right_summery)
#### "repl" ####
## Case 1.6
summery = stats.compute_rep_sum(["klitze", "kleine"], "repl", ignore_num=True, ignore_symbol=True,stemmed_search=True)
summery = {letter:{rep_num:[rep_num_data[0], dict(rep_num_data[1])] for rep_num, rep_num_data in letter_data.iteritems()} for letter, letter_data in summery.iteritems()}
#p(summery)
right_summery = {u'i': {3: [2, {u'kle^3i^3n^3': 1, u'kli^3tzes^3': 1}], 4: [3, {u'kli^4tz': 1, u'kli^4tze^7': 1, u'kli^4tz^3': 1}]}, u's': {3: [1, {u'kli^3tzes^3': 1}], 4: [2, {u'klitzes^4': 1, u'kleines^4': 1}]}, u'z': {3: [1, {u'kli^4tz^3': 1}]}, u'e': {3: [1, {u'kle^3i^3n^3': 1}], 4: [1, {u'klitze^4': 1}], 5: [2, {u'kle^5ine': 1, u'kle^5in^5e': 1}], 7: [1, {u'kli^4tze^7': 1}]}, u'n': {3: [3, {u'klein^3e^2s': 1, u'kle^3i^3n^3': 1, u'klein^3e': 1}], 5: [2, {u'kle^5in^5e': 1, u'klein^5': 1}]}}
summery.should.be.equal(right_summery)
# ################################################
# #### "redu" ####
# ##############################################
## Case 2.1
summery = stats.compute_rep_sum("*", "redu")
summery = {word:{redu_length:occur for redu_length, occur in word_data.iteritems()} for word, word_data in summery.iteritems()}
#p(summery)
right_summery = {u'-)': {2: 1}, u'baseline': {3: 2}, u'bleibt': {2: 1}, u'geniest': {2: 1}, u'in': {4: 2}, u'kan': {2: 1}, u'klein': {2: 1}, u'kleine': {2: 1}, u'kleinere': {2: 1}, u'kleines': {2: 1, 3: 2}, u'klitz': {3: 1}, u'klitze': {2: 1, 4: 1}, u'klitzes': {2: 1}}
summery.should.be.equal(right_summery)
# i=0
# for word, word_data in summery.iteritems():
# for redu_length, occur in word_data.iteritems():
# i+= 1
# print word,redu_length, occur
#################################END##################################################
############################EXTERN METHODS############################################
######################################################################################
####################################################################################################
####################################################################################################
###################### STOP STABLE TESTS #########################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
###################### START WORK_IN_PROGRESS (wipd) TESTS #########################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
###################### STOP WORK_IN_PROGRESS (wipd) TESTS #########################################
####################################################################################################
####################################################################################################
| 95.815748
| 52,894
| 0.506155
| 138,375
| 905,363
| 3.222584
| 0.007183
| 0.183242
| 0.207586
| 0.207748
| 0.96832
| 0.961301
| 0.953932
| 0.946789
| 0.940548
| 0.936045
| 0
| 0.067833
| 0.206725
| 905,363
| 9,448
| 52,895
| 95.825889
| 0.55284
| 0.243928
| 0
| 0.715126
| 0
| 0.073259
| 0.329513
| 0.025846
| 0
| 0
| 0
| 0
| 0.029228
| 0
| null | null | 0
| 0.005124
| null | null | 0.005694
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
7090b23856074a03e18d69d3f6c24ac27a12ba49
| 297
|
py
|
Python
|
src/researchhub_case/views/__init__.py
|
ResearchHub/ResearchHub-Backend-Open
|
d36dca33afae2d442690694bb2ab17180d84bcd3
|
[
"MIT"
] | 18
|
2021-05-20T13:20:16.000Z
|
2022-02-11T02:40:18.000Z
|
src/researchhub_case/views/__init__.py
|
ResearchHub/ResearchHub-Backend-Open
|
d36dca33afae2d442690694bb2ab17180d84bcd3
|
[
"MIT"
] | 109
|
2021-05-21T20:14:23.000Z
|
2022-03-31T20:56:10.000Z
|
src/researchhub_case/views/__init__.py
|
ResearchHub/ResearchHub-Backend-Open
|
d36dca33afae2d442690694bb2ab17180d84bcd3
|
[
"MIT"
] | 4
|
2021-05-17T13:47:53.000Z
|
2022-02-12T10:48:21.000Z
|
# flake8: noqa
from .author_claim_case_moderator_view import get_author_claim_counts_for_mods
from .author_claim_case_moderator_view import handle_author_claim_cases_for_mods
from .author_claim_case_view import AuthorClaimCaseViewSet
from .author_claim_case_view import validate_user_request_email
| 59.4
| 80
| 0.919192
| 45
| 297
| 5.466667
| 0.444444
| 0.268293
| 0.243902
| 0.308943
| 0.601626
| 0.601626
| 0.308943
| 0
| 0
| 0
| 0
| 0.003584
| 0.060606
| 297
| 5
| 81
| 59.4
| 0.878136
| 0.040404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3b69fa4730083236317949c3d71378dfa3e3b549
| 509,166
|
py
|
Python
|
tags/test_views.py
|
UW-GAC/pie
|
89ae277f5ba1357580d78c3527f26200686308a6
|
[
"MIT"
] | null | null | null |
tags/test_views.py
|
UW-GAC/pie
|
89ae277f5ba1357580d78c3527f26200686308a6
|
[
"MIT"
] | 3
|
2020-01-02T20:17:06.000Z
|
2020-01-04T21:13:09.000Z
|
tags/test_views.py
|
UW-GAC/pie
|
89ae277f5ba1357580d78c3527f26200686308a6
|
[
"MIT"
] | 1
|
2021-10-29T22:15:27.000Z
|
2021-10-29T22:15:27.000Z
|
"""Tests of views in the tags app."""
import copy
from faker import Faker
from django.contrib.auth.models import Group
from django.urls import reverse
from core.factories import UserFactory
from core.utils import (LoginRequiredTestCase, PhenotypeTaggerLoginTestCase, UserLoginTestCase,
DCCAnalystLoginTestCase, DCCDeveloperLoginTestCase, get_autocomplete_view_ids)
from trait_browser.factories import SourceDatasetFactory, SourceStudyVersionFactory, SourceTraitFactory, StudyFactory
from trait_browser.models import SourceTrait
from . import factories
from . import forms
from . import models
from . import tables
from . import views
fake = Faker()
class TagDetailTestsMixin(object):
"""Mixin to run standard tests for the TagDetail view, for use with TestCase or subclass of TestCase."""
def get_url(self, *args):
return reverse('tags:tag:detail', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tag.pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_pk(self):
"""Returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.tag.pk + 1))
self.assertEqual(response.status_code, 404)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tag.pk))
context = response.context
self.assertIn('tag', context)
self.assertEqual(context['tag'], self.tag)
self.assertIn('study_counts', context)
self.assertIn('traits_tagged_count', context)
def test_no_archived_taggedtraits(self):
"""A non-archived tagged trait is in the study counts, but not an archived one."""
archived_tagged_trait = factories.TaggedTraitFactory.create(archived=True, tag=self.tag)
non_archived_tagged_trait = factories.TaggedTraitFactory.create(archived=False, tag=self.tag)
response = self.client.get(self.get_url(self.tag.pk))
context = response.context
study_names = [el['study_name'] for el in context['study_counts']]
self.assertIn(
non_archived_tagged_trait.trait.source_dataset.source_study_version.study.i_study_name, study_names)
self.assertNotIn(
archived_tagged_trait.trait.source_dataset.source_study_version.study.i_study_name, study_names)
def test_no_deprecated_traits(self):
"""Counts exclude traits tagged from deprecated study versions."""
study = StudyFactory.create()
current_study_version = SourceStudyVersionFactory.create(study=study, i_version=5)
old_study_version = SourceStudyVersionFactory.create(study=study, i_version=4, i_is_deprecated=True)
current_trait = SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)
old_trait = SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)
current_tagged_trait = factories.TaggedTraitFactory.create(trait=current_trait, tag=self.tag)
old_tagged_trait = factories.TaggedTraitFactory.create(trait=old_trait, tag=self.tag)
response = self.client.get(self.get_url(self.tag.pk))
context = response.context
self.assertEqual(context['study_counts'][0]['study_pk'], study.pk)
self.assertEqual(context['study_counts'][0]['tt_count'], 1)
self.assertEqual(context['traits_tagged_count'], 1)
def test_no_deprecated_traits_with_same_version_number(self):
"""Counts exclude traits tagged from deprecated study versions even with same version number."""
# This directly addresses the unusual CARDIA situation where there are two study versions with the
# same version number, one of which is deprecated.
study = StudyFactory.create()
current_study_version = SourceStudyVersionFactory.create(study=study, i_version=5)
old_study_version = SourceStudyVersionFactory.create(study=study, i_version=5, i_is_deprecated=True)
current_trait = SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)
old_trait = SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)
current_tagged_trait = factories.TaggedTraitFactory.create(trait=current_trait, tag=self.tag)
old_tagged_trait = factories.TaggedTraitFactory.create(trait=old_trait, tag=self.tag)
response = self.client.get(self.get_url(self.tag.pk))
context = response.context
self.assertEqual(context['study_counts'][0]['study_pk'], study.pk)
self.assertEqual(context['study_counts'][0]['tt_count'], 1)
self.assertEqual(context['traits_tagged_count'], 1)
class TagDetailTest(TagDetailTestsMixin, UserLoginTestCase):
def setUp(self):
super(TagDetailTest, self).setUp()
self.tag = factories.TagFactory.create()
def test_no_tagging_button(self):
"""Regular user does not see a button to add tags on this detail page."""
response = self.client.get(self.get_url(self.tag.pk))
self.assertNotContains(response, reverse('tags:add-many:by-tag', kwargs={'pk': self.tag.pk}))
class TagDetailPhenotypeTaggerTest(TagDetailTestsMixin, PhenotypeTaggerLoginTestCase):
def setUp(self):
super(TagDetailPhenotypeTaggerTest, self).setUp()
self.trait = SourceTraitFactory.create(source_dataset__source_study_version__study=self.study)
self.tag = factories.TagFactory.create()
self.user.refresh_from_db()
def test_has_tagging_button(self):
"""A phenotype tagger does see a button to add tags on this detail page."""
response = self.client.get(self.get_url(self.tag.pk))
self.assertContains(response, reverse('tags:add-many:by-tag', kwargs={'pk': self.tag.pk}))
class TagDetailDCCAnalystTest(TagDetailTestsMixin, DCCAnalystLoginTestCase):
def setUp(self):
super(TagDetailDCCAnalystTest, self).setUp()
self.study = StudyFactory.create()
self.trait = SourceTraitFactory.create(source_dataset__source_study_version__study=self.study)
self.tag = factories.TagFactory.create()
self.user.refresh_from_db()
def test_has_tagging_button(self):
"""A DCC analyst does see a button to add tags on this detail page."""
response = self.client.get(self.get_url(self.tag.pk))
self.assertContains(response, reverse('tags:add-many:by-tag', kwargs={'pk': self.tag.pk}))
class TagAutocompleteTest(UserLoginTestCase):
"""Autocomplete view works as expected."""
def setUp(self):
super(TagAutocompleteTest, self).setUp()
self.tags = factories.TagFactory.create_batch(10)
def get_url(self, *args):
return reverse('tags:autocomplete')
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_returns_all_tags(self):
"""Queryset returns all of the tags with no query (when there are 10, which is the page limit)."""
url = self.get_url()
response = self.client.get(url)
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([tag.pk for tag in self.tags]), sorted(pks))
def test_proper_tag_in_queryset(self):
"""Queryset returns only the proper tag by title."""
tag = self.tags[0]
response = self.client.get(self.get_url(), {'q': tag.title})
pks = get_autocomplete_view_ids(response)
self.assertTrue(len(pks) == 1)
self.assertEqual(tag.pk, pks[0])
def test_proper_tag_in_queryset_upper_case(self):
"""Queryset returns only the proper tag by title when query is in upper case."""
tag = self.tags[0]
response = self.client.get(self.get_url(), {'q': tag.title.upper()})
pks = get_autocomplete_view_ids(response)
self.assertTrue(len(pks) == 1)
self.assertEqual(tag.pk, pks[0])
def test_proper_tag_in_queryset_lower_case(self):
"""Queryset returns only the proper tag by title when query is in lower case."""
tag = self.tags[0]
response = self.client.get(self.get_url(), {'q': tag.title.lower()})
pks = get_autocomplete_view_ids(response)
self.assertTrue(len(pks) == 1)
self.assertEqual(tag.pk, pks[0])
def test_proper_tag_in_queryset_partial_query(self):
"""The results contain the desired trait when a single letter is used for the query."""
tag = self.tags[0]
response = self.client.get(self.get_url(), {'q': tag.title[0]})
pks = get_autocomplete_view_ids(response)
self.assertTrue(len(pks) >= 1)
self.assertIn(tag.pk, pks)
def test_unreviewed_only_returns_no_tags_without_tagged_traits(self):
"""Queryset returns only tags with unreviewed tagged traits, with unreviewed_only argument."""
url = self.get_url()
response = self.client.get(url, {'q': '', 'forward': ['{"unreviewed_only":true}']})
pks = get_autocomplete_view_ids(response)
self.assertEqual([], pks)
def test_unreviewed_only_returns_correct_tag(self):
"""Queryset returns only tags with unreviewed tagged traits, with unreviewed_only argument."""
unreviewed_tagged_trait = factories.TaggedTraitFactory.create(tag=self.tags[0])
reviewed_tagged_trait = factories.TaggedTraitFactory.create(tag=self.tags[1])
factories.DCCReviewFactory.create(tagged_trait=reviewed_tagged_trait)
url = self.get_url()
response = self.client.get(url, {'q': '', 'forward': ['{"unreviewed_only":true}']})
pks = get_autocomplete_view_ids(response)
self.assertEqual([unreviewed_tagged_trait.tag.pk], pks)
def test_unreviewed_only_returns_all_tags(self):
"""Queryset returns only tags with unreviewed tagged traits, with unreviewed_only argument."""
for tag in self.tags:
factories.TaggedTraitFactory.create(tag=tag)
url = self.get_url()
response = self.client.get(url, {'q': '', 'forward': ['{"unreviewed_only":true}']})
pks = get_autocomplete_view_ids(response)
self.assertEqual(sorted([tag.pk for tag in self.tags]), sorted(pks))
class TagListTest(UserLoginTestCase):
def setUp(self):
super(TagListTest, self).setUp()
self.tags = factories.TagFactory.create_batch(20)
def get_url(self, *args):
return reverse('tags:list')
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url())
context = response.context
self.assertTrue('tag_table' in context)
self.assertIsInstance(context['tag_table'], tables.TagTable)
def test_no_deprecated_traits(self):
"""Counts exclude traits tagged from deprecated study versions."""
tag = self.tags[0]
models.Tag.objects.exclude(pk=tag.pk).delete()
study = StudyFactory.create()
current_study_version = SourceStudyVersionFactory.create(study=study, i_version=5)
old_study_version = SourceStudyVersionFactory.create(study=study, i_version=4, i_is_deprecated=True)
current_trait = SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)
old_trait = SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)
current_tagged_trait = factories.TaggedTraitFactory.create(trait=current_trait, tag=tag)
old_tagged_trait = factories.TaggedTraitFactory.create(trait=old_trait, tag=tag)
response = self.client.get(self.get_url())
context = response.context
tag_table = context['tag_table']
row = tag_table.rows[0]
count = row.get_cell('number_tagged_traits')
self.assertEqual(count, 1)
def test_no_deprecated_traits_with_same_version_number(self):
"""Counts exclude traits tagged from deprecated study versions even with same version number."""
# This directly addresses the unusual CARDIA situation where there are two study versions with the
# same version number, one of which is deprecated.
tag = self.tags[0]
models.Tag.objects.exclude(pk=tag.pk).delete()
study = StudyFactory.create()
current_study_version = SourceStudyVersionFactory.create(study=study, i_version=5)
old_study_version = SourceStudyVersionFactory.create(study=study, i_version=5, i_is_deprecated=True)
current_trait = SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)
old_trait = SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)
current_tagged_trait = factories.TaggedTraitFactory.create(trait=current_trait, tag=tag)
old_tagged_trait = factories.TaggedTraitFactory.create(trait=old_trait, tag=tag)
response = self.client.get(self.get_url())
context = response.context
tag_table = context['tag_table']
row = tag_table.rows[0]
count = row.get_cell('number_tagged_traits')
self.assertEqual(count, 1)
class TaggedTraitDetailTestsMixin(object):
"""Mixin to run standard tests for the TaggedTraitDetail view, for use with TestCase or subclass of TestCase."""
def setUp(self):
super().setUp()
if self.user.profile.taggable_studies.count() > 0:
user_study = self.study
else:
user_study = StudyFactory.create()
self.tag = factories.TagFactory.create()
# Create a tagged trait of each possible status combination.
self.tagged_traits = {}
self.tagged_traits['unreviewed'] = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=user_study)
self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'] = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=user_study)
factories.DCCReviewFactory.create(
tagged_trait=self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'],
status=models.DCCReview.STATUS_FOLLOWUP)
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_no_dccdecision'] = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=user_study)
factories.DCCReviewFactory.create(
tagged_trait=self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'],
status=models.DCCReview.STATUS_FOLLOWUP)
factories.StudyResponseFactory.create(
dcc_review=self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].dcc_review,
status=models.StudyResponse.STATUS_DISAGREE)
self.tagged_traits['confirmed_dccreview'] = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=user_study)
factories.DCCReviewFactory.create(
tagged_trait=self.tagged_traits['confirmed_dccreview'],
status=models.DCCReview.STATUS_CONFIRMED)
self.tagged_traits[
'followup_dccreview_no_studyresponse_remove_dccdecision_archived'] = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=user_study)
factories.DCCReviewFactory.create(
tagged_trait=self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'],
status=models.DCCReview.STATUS_FOLLOWUP)
factories.DCCDecisionFactory.create(
dcc_review=self.tagged_traits[
'followup_dccreview_no_studyresponse_remove_dccdecision_archived'].dcc_review,
decision=models.DCCDecision.DECISION_REMOVE)
self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].archive()
self.tagged_traits[
'followup_dccreview_no_studyresponse_confirm_dccdecision'] = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=user_study)
factories.DCCReviewFactory.create(
tagged_trait=self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'],
status=models.DCCReview.STATUS_FOLLOWUP)
factories.DCCDecisionFactory.create(
dcc_review=self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].dcc_review,
decision=models.DCCDecision.DECISION_CONFIRM)
self.tagged_traits['followup_dccreview_agree_studyresponse_archived'] = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=user_study)
factories.DCCReviewFactory.create(
tagged_trait=self.tagged_traits['followup_dccreview_agree_studyresponse_archived'],
status=models.DCCReview.STATUS_FOLLOWUP)
factories.StudyResponseFactory.create(
dcc_review=self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].dcc_review,
status=models.StudyResponse.STATUS_AGREE)
self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].archive()
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'
] = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=user_study)
factories.DCCReviewFactory.create(
tagged_trait=self.tagged_traits['followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'],
status=models.DCCReview.STATUS_FOLLOWUP)
factories.StudyResponseFactory.create(
dcc_review=self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].dcc_review,
status=models.StudyResponse.STATUS_DISAGREE)
factories.DCCDecisionFactory.create(
dcc_review=self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].dcc_review,
decision=models.DCCDecision.DECISION_REMOVE)
self.tagged_traits['followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].archive()
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_confirm_dccdecision'] = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=user_study)
factories.DCCReviewFactory.create(
tagged_trait=self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'],
status=models.DCCReview.STATUS_FOLLOWUP)
factories.StudyResponseFactory.create(
dcc_review=self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].dcc_review,
status=models.StudyResponse.STATUS_DISAGREE)
factories.DCCDecisionFactory.create(
dcc_review=self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].dcc_review,
decision=models.DCCDecision.DECISION_CONFIRM)
def get_url(self, *args):
return reverse('tags:tagged-traits:pk:detail', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tagged_traits['unreviewed'].pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_pk(self):
"""Returns 404 response code when the pk doesn't exist."""
unreviewed_pk = self.tagged_traits['unreviewed'].pk
self.tagged_traits['unreviewed'].delete()
response = self.client.get(self.get_url(unreviewed_pk))
self.assertEqual(response.status_code, 404)
def test_context_data(self):
"""Expected context variables exist."""
response = self.client.get(self.get_url(self.tagged_traits['unreviewed'].pk))
context = response.context
self.assertIn('tagged_trait', context)
self.assertEqual(context['tagged_trait'], self.tagged_traits['unreviewed'])
self.assertIn('show_quality_review_panel', context)
self.assertIn('show_dcc_review_add_button', context)
self.assertIn('show_dcc_review_update_button', context)
self.assertIn('show_dcc_review_confirmed', context)
self.assertIn('show_dcc_review_needs_followup', context)
self.assertIn('show_study_response_status', context)
self.assertIn('show_study_agrees', context)
self.assertIn('show_study_disagrees', context)
self.assertIn('show_dcc_decision', context)
self.assertIn('show_dcc_decision_add_button', context)
self.assertIn('show_dcc_decision_update_button', context)
self.assertIn('show_decision_remove', context)
self.assertIn('show_decision_confirm', context)
self.assertIn('show_decision_comment', context)
self.assertIn('show_delete_button', context)
self.assertIn('show_archived', context)
self.assertIn('quality_review_panel_color', context)
self.assertIn('is_deprecated', context)
self.assertIn('show_removed_text', context)
self.assertIn('new_version_link', context)
def test_no_other_tags(self):
"""Other tags linked to the same trait are not included in the page."""
another_tag = factories.TagFactory.create()
another_tagged_trait = factories.TaggedTraitFactory.create(
trait=self.tagged_traits['unreviewed'].trait, tag=another_tag)
response = self.client.get(self.get_url(self.tagged_traits['unreviewed'].pk))
context = response.context
self.assertNotIn('show_other_tags', context)
content = str(response.content)
self.assertNotIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_traits['unreviewed'].tag.title, content)
def test_delete_link_present_for_unreviewed_tagged_trait(self):
"""Delete button is shown for unreviewed tagged trait."""
response = self.client.get(self.get_url(self.tagged_traits['unreviewed'].pk))
self.assertContains(
response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': self.tagged_traits['unreviewed'].pk}))
def test_delete_link_not_shown_for_all_reviewed_tagged_traits(self):
"""Shows no button to delete the rest of tagged traits, which all have reviews."""
del self.tagged_traits['unreviewed']
for tt_type in self.tagged_traits:
tagged_trait = self.tagged_traits[tt_type]
response = self.client.get(self.get_url(tagged_trait.pk))
self.assertNotContains(response, reverse('tags:tagged-traits:pk:delete', kwargs={'pk': tagged_trait.pk}))
def test_deprecated_tagged_trait_no_new_version(self):
"""Context variables are set properly for deprecated tagged trait with no new version."""
study = StudyFactory.create()
self.user.profile.taggable_studies.add(study)
self.user.refresh_from_db()
source_study_version1 = SourceStudyVersionFactory.create(study=study, i_is_deprecated=True, i_version=1)
source_study_version2 = SourceStudyVersionFactory.create(study=study, i_is_deprecated=False, i_version=2)
trait1 = SourceTraitFactory.create(source_dataset__source_study_version=source_study_version1)
deprecated_tagged_trait = factories.TaggedTraitFactory.create(trait=trait1)
response = self.client.get(self.get_url(deprecated_tagged_trait.pk))
context = response.context
self.assertTrue(context['is_deprecated'])
self.assertTrue(context['show_removed_text'])
self.assertIsNone(context['new_version_link'])
self.assertContains(response, '<div class="alert alert-danger" role="alert" id="removed_deprecated_trait">')
self.assertNotContains(response, '<div class="alert alert-danger" role="alert" id="updated_deprecated_trait">')
def test_deprecated_tagged_trait_with_new_version(self):
"""Correct context variables for deprecated tagged trait with new version."""
study = StudyFactory.create()
self.user.profile.taggable_studies.add(study)
self.user.refresh_from_db()
tag = factories.TagFactory.create()
source_study_version1 = SourceStudyVersionFactory.create(study=study, i_is_deprecated=True, i_version=1)
source_study_version2 = SourceStudyVersionFactory.create(study=study, i_is_deprecated=False, i_version=2)
source_dataset1 = SourceDatasetFactory.create(source_study_version=source_study_version1)
source_dataset2 = SourceDatasetFactory.create(
source_study_version=source_study_version2,
i_accession=source_dataset1.i_accession,
i_version=source_dataset1.i_version,
i_is_subject_file=source_dataset1.i_is_subject_file,
i_study_subject_column=source_dataset1.i_study_subject_column,
i_dbgap_description=source_dataset1.i_dbgap_description
)
trait1 = SourceTraitFactory.create(source_dataset=source_dataset1)
trait2 = SourceTraitFactory.create(
source_dataset=source_dataset2,
i_detected_type=trait1.i_detected_type,
i_dbgap_type=trait1.i_dbgap_type,
i_dbgap_variable_accession=trait1.i_dbgap_variable_accession,
i_dbgap_variable_version=trait1.i_dbgap_variable_version,
i_dbgap_comment=trait1.i_dbgap_comment,
i_dbgap_unit=trait1.i_dbgap_unit,
i_n_records=trait1.i_n_records,
i_n_missing=trait1.i_n_missing,
i_is_unique_key=trait1.i_is_unique_key,
i_are_values_truncated=trait1.i_are_values_truncated
)
tagged_trait1 = factories.TaggedTraitFactory.create(trait=trait1, tag=tag)
tagged_trait2 = factories.TaggedTraitFactory.create(trait=trait2, tag=tag, previous_tagged_trait=tagged_trait1)
response = self.client.get(self.get_url(tagged_trait1.pk))
context = response.context
self.assertTrue(context['is_deprecated'])
self.assertFalse(context['show_removed_text'])
self.assertEqual(context['new_version_link'], tagged_trait2.get_absolute_url())
self.assertContains(response, context['new_version_link'])
self.assertNotContains(response, '<div class="alert alert-danger" role="alert" id="removed_deprecated_trait">')
self.assertContains(response, '<div class="alert alert-danger" role="alert" id="updated_deprecated_trait">')
def test_deprecated_tagged_trait_with_two_new_versions(self):
"""Correct context variables for deprecated tagged trait with two new versions."""
study = StudyFactory.create()
self.user.profile.taggable_studies.add(study)
self.user.refresh_from_db()
tag = factories.TagFactory.create()
source_study_version1 = SourceStudyVersionFactory.create(study=study, i_is_deprecated=True, i_version=1)
source_study_version2 = SourceStudyVersionFactory.create(study=study, i_is_deprecated=True, i_version=2)
source_study_version3 = SourceStudyVersionFactory.create(study=study, i_is_deprecated=False, i_version=3)
source_dataset1 = SourceDatasetFactory.create(source_study_version=source_study_version1)
source_dataset2 = SourceDatasetFactory.create(
source_study_version=source_study_version2,
i_accession=source_dataset1.i_accession,
i_version=source_dataset1.i_version,
i_is_subject_file=source_dataset1.i_is_subject_file,
i_study_subject_column=source_dataset1.i_study_subject_column,
i_dbgap_description=source_dataset1.i_dbgap_description
)
source_dataset3 = SourceDatasetFactory.create(
source_study_version=source_study_version3,
i_accession=source_dataset1.i_accession,
i_version=source_dataset1.i_version,
i_is_subject_file=source_dataset1.i_is_subject_file,
i_study_subject_column=source_dataset1.i_study_subject_column,
i_dbgap_description=source_dataset1.i_dbgap_description
)
trait1 = SourceTraitFactory.create(source_dataset=source_dataset1)
trait2 = SourceTraitFactory.create(
source_dataset=source_dataset2,
i_detected_type=trait1.i_detected_type,
i_dbgap_type=trait1.i_dbgap_type,
i_dbgap_variable_accession=trait1.i_dbgap_variable_accession,
i_dbgap_variable_version=trait1.i_dbgap_variable_version,
i_dbgap_comment=trait1.i_dbgap_comment,
i_dbgap_unit=trait1.i_dbgap_unit,
i_n_records=trait1.i_n_records,
i_n_missing=trait1.i_n_missing,
i_is_unique_key=trait1.i_is_unique_key,
i_are_values_truncated=trait1.i_are_values_truncated
)
trait3 = SourceTraitFactory.create(
source_dataset=source_dataset3,
i_detected_type=trait1.i_detected_type,
i_dbgap_type=trait1.i_dbgap_type,
i_dbgap_variable_accession=trait1.i_dbgap_variable_accession,
i_dbgap_variable_version=trait1.i_dbgap_variable_version,
i_dbgap_comment=trait1.i_dbgap_comment,
i_dbgap_unit=trait1.i_dbgap_unit,
i_n_records=trait1.i_n_records,
i_n_missing=trait1.i_n_missing,
i_is_unique_key=trait1.i_is_unique_key,
i_are_values_truncated=trait1.i_are_values_truncated
)
tagged_trait1 = factories.TaggedTraitFactory.create(trait=trait1, tag=tag)
tagged_trait2 = factories.TaggedTraitFactory.create(trait=trait2, tag=tag, previous_tagged_trait=tagged_trait1)
tagged_trait3 = factories.TaggedTraitFactory.create(trait=trait3, tag=tag, previous_tagged_trait=tagged_trait2)
response = self.client.get(self.get_url(tagged_trait1.pk))
context = response.context
self.assertTrue(context['is_deprecated'])
self.assertFalse(context['show_removed_text'])
self.assertEqual(context['new_version_link'], tagged_trait3.get_absolute_url())
self.assertContains(response, context['new_version_link'])
self.assertNotContains(response, '<div class="alert alert-danger" role="alert" id="removed_deprecated_trait">')
self.assertContains(response, '<div class="alert alert-danger" role="alert" id="updated_deprecated_trait">')
class TaggedTraitDetailPhenotypeTaggerTest(TaggedTraitDetailTestsMixin, PhenotypeTaggerLoginTestCase):
def test_context_unreviewed(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(self.tagged_traits['unreviewed'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_dcc_review_needs_followup'])
self.assertNotContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response, reverse('tags:tagged-traits:pk:dcc-review:new', args=[self.tagged_traits['unreviewed'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response, reverse('tags:tagged-traits:pk:dcc-review:update', args=[self.tagged_traits['unreviewed'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response, reverse('tags:tagged-traits:pk:dcc-decision:new', args=[self.tagged_traits['unreviewed'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response, reverse('tags:tagged-traits:pk:dcc-decision:update', args=[self.tagged_traits['unreviewed'].pk]))
self.assertTrue(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], '')
def test_context_followup_dccreview_no_studyresponse_no_dccdecision(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(
self.get_url(self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], '')
def test_context_followup_dccreview_disagree_studyresponse_no_dccdecision(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(
self.get_url(self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertTrue(context['show_study_response_status'])
self.assertContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertTrue(context['show_study_disagrees'])
self.assertContains(response, 'should remain tagged')
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_no_dccdecision'].dcc_review.study_response.comment)
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
# self.assertNotContains(response, self.tagged_traits[''].dcc_review.dcc_decision.comment)
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], '')
def test_context_confirmed_dccreview(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(self.tagged_traits['confirmed_dccreview'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertTrue(context['show_dcc_review_confirmed'])
self.assertContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_dcc_review_needs_followup'])
self.assertNotContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new', args=[self.tagged_traits['confirmed_dccreview'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update', args=[self.tagged_traits['confirmed_dccreview'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new', args=[self.tagged_traits['confirmed_dccreview'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update', args=[self.tagged_traits['confirmed_dccreview'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-success')
def test_context_followup_dccreview_no_studyresponse_remove_dccdecision_archived(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(
self.get_url(self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertTrue(context['show_dcc_decision'])
self.assertTrue(context['show_decision_remove'])
self.assertContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
self.assertNotContains(
response,
self.tagged_traits[
'followup_dccreview_no_studyresponse_remove_dccdecision_archived'].dcc_review.dcc_decision.comment)
self.assertTrue(context['show_archived'])
self.assertContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-danger')
def test_context_followup_dccreview_no_studyresponse_confirm_dccdecision(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(
self.get_url(self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertTrue(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertTrue(context['show_decision_confirm'])
self.assertContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
self.assertNotContains(
response, self.tagged_traits[
'followup_dccreview_no_studyresponse_confirm_dccdecision'].dcc_review.dcc_decision.comment)
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-success')
def test_context_followup_dccreview_agree_studyresponse_archived(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(
self.get_url(self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertTrue(context['show_study_response_status'])
self.assertContains(response, 'The study')
self.assertTrue(context['show_study_agrees'])
self.assertContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
self.assertFalse(context['show_decision_comment'])
self.assertTrue(context['show_archived'])
self.assertContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-danger')
def test_context_followup_dccreview_disagree_studyresponse_remove_dccdecision_archived(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertTrue(context['show_study_response_status'])
self.assertContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertTrue(context['show_study_disagrees'])
self.assertContains(response, 'should remain tagged')
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'
].dcc_review.study_response.comment)
self.assertTrue(context['show_dcc_decision'])
self.assertTrue(context['show_decision_remove'])
self.assertContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
self.assertFalse(context['show_decision_comment'])
self.assertTrue(context['show_archived'])
self.assertContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-danger')
def test_context_followup_dccreview_disagree_studyresponse_confirm_dccdecision(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(
self.get_url(self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertTrue(context['show_study_response_status'])
self.assertContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertTrue(context['show_study_disagrees'])
self.assertContains(response, 'should remain tagged')
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_confirm_dccdecision'
].dcc_review.study_response.comment)
self.assertTrue(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertTrue(context['show_decision_confirm'])
self.assertContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
self.assertNotContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_confirm_dccdecision'
].dcc_review.dcc_decision.comment)
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-success')
class TaggedTraitDetailDCCAnalystTest(TaggedTraitDetailTestsMixin, DCCAnalystLoginTestCase):
def test_context_unreviewed(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(self.tagged_traits['unreviewed'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_dcc_review_needs_followup'])
self.assertNotContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertTrue(context['show_dcc_review_add_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new', args=[self.tagged_traits['unreviewed'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update', args=[self.tagged_traits['unreviewed'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new', args=[self.tagged_traits['unreviewed'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update', args=[self.tagged_traits['unreviewed'].pk]))
self.assertTrue(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], '')
def test_context_followup_dccreview_no_studyresponse_no_dccdecision(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk]))
self.assertTrue(context['show_dcc_review_update_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], '')
def test_context_followup_dccreview_disagree_studyresponse_no_dccdecision(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertTrue(context['show_study_response_status'])
self.assertContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertTrue(context['show_study_disagrees'])
self.assertContains(response, 'should remain tagged')
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_no_dccdecision'
].dcc_review.study_response.comment)
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
# self.assertNotContains(response, self.tagged_traits[''].dcc_review.dcc_decision.comment)
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk]))
self.assertTrue(context['show_dcc_decision_add_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], '')
def test_context_confirmed_dccreview(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(self.tagged_traits['confirmed_dccreview'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertTrue(context['show_dcc_review_confirmed'])
self.assertContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_dcc_review_needs_followup'])
self.assertNotContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new', args=[self.tagged_traits['confirmed_dccreview'].pk]))
self.assertTrue(context['show_dcc_review_update_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update', args=[self.tagged_traits['confirmed_dccreview'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new', args=[self.tagged_traits['confirmed_dccreview'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update', args=[self.tagged_traits['confirmed_dccreview'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-success')
def test_context_followup_dccreview_no_studyresponse_remove_dccdecision_archived(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertTrue(context['show_dcc_decision'])
self.assertTrue(context['show_decision_remove'])
self.assertContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_decision_comment'])
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_no_studyresponse_remove_dccdecision_archived'].dcc_review.dcc_decision.comment)
self.assertTrue(context['show_archived'])
self.assertContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk]))
self.assertTrue(context['show_dcc_decision_update_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-danger')
def test_context_followup_dccreview_no_studyresponse_confirm_dccdecision(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertTrue(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertTrue(context['show_decision_confirm'])
self.assertContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_decision_comment'])
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_no_studyresponse_confirm_dccdecision'].dcc_review.dcc_decision.comment)
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk]))
self.assertTrue(context['show_dcc_decision_update_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-success')
def test_context_followup_dccreview_agree_studyresponse_archived(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertTrue(context['show_study_response_status'])
self.assertContains(response, 'The study')
self.assertTrue(context['show_study_agrees'])
self.assertContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
self.assertFalse(context['show_decision_comment'])
self.assertTrue(context['show_archived'])
self.assertContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-danger')
def test_context_followup_dccreview_disagree_studyresponse_remove_dccdecision_archived(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertTrue(context['show_study_response_status'])
self.assertContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertTrue(context['show_study_disagrees'])
self.assertContains(response, 'should remain tagged')
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'
].dcc_review.study_response.comment)
self.assertTrue(context['show_dcc_decision'])
self.assertTrue(context['show_decision_remove'])
self.assertContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
self.assertTrue(context['show_decision_comment'])
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'
].dcc_review.dcc_decision.comment)
self.assertTrue(context['show_archived'])
self.assertContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'
].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'
].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'
].pk]))
self.assertTrue(context['show_dcc_decision_update_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'
].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-danger')
def test_context_followup_dccreview_disagree_studyresponse_confirm_dccdecision(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertTrue(context['show_study_response_status'])
self.assertContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertTrue(context['show_study_disagrees'])
self.assertContains(response, 'should remain tagged')
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_confirm_dccdecision'].dcc_review.study_response.comment)
self.assertTrue(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertTrue(context['show_decision_confirm'])
self.assertContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_decision_comment'])
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_confirm_dccdecision'].dcc_review.dcc_decision.comment)
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk]))
self.assertTrue(context['show_dcc_decision_update_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-success')
class TaggedTraitDetailDCCDeveloperTest(TaggedTraitDetailTestsMixin, DCCDeveloperLoginTestCase):
def test_context_unreviewed(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(self.tagged_traits['unreviewed'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_dcc_review_needs_followup'])
self.assertNotContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertTrue(context['show_dcc_review_add_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new', args=[self.tagged_traits['unreviewed'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update', args=[self.tagged_traits['unreviewed'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new', args=[self.tagged_traits['unreviewed'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update', args=[self.tagged_traits['unreviewed'].pk]))
self.assertTrue(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], '')
def test_context_followup_dccreview_no_studyresponse_no_dccdecision(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk]))
self.assertTrue(context['show_dcc_review_update_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], '')
def test_context_followup_dccreview_disagree_studyresponse_no_dccdecision(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertTrue(context['show_study_response_status'])
self.assertContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertTrue(context['show_study_disagrees'])
self.assertContains(response, 'should remain tagged')
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_no_dccdecision'].dcc_review.study_response.comment)
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
# self.assertNotContains(response, self.tagged_traits[''].dcc_review.dcc_decision.comment)
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk]))
self.assertTrue(context['show_dcc_decision_add_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_no_dccdecision'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], '')
def test_context_confirmed_dccreview(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(self.tagged_traits['confirmed_dccreview'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertTrue(context['show_dcc_review_confirmed'])
self.assertContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_dcc_review_needs_followup'])
self.assertNotContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertFalse(context['show_decision_comment'])
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new', args=[self.tagged_traits['confirmed_dccreview'].pk]))
self.assertTrue(context['show_dcc_review_update_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update', args=[self.tagged_traits['confirmed_dccreview'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new', args=[self.tagged_traits['confirmed_dccreview'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update', args=[self.tagged_traits['confirmed_dccreview'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-success')
def test_context_followup_dccreview_no_studyresponse_remove_dccdecision_archived(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertTrue(context['show_dcc_decision'])
self.assertTrue(context['show_decision_remove'])
self.assertContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_decision_comment'])
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_no_studyresponse_remove_dccdecision_archived'].dcc_review.dcc_decision.comment)
self.assertTrue(context['show_archived'])
self.assertContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk]))
self.assertTrue(context['show_dcc_decision_update_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-danger')
def test_context_followup_dccreview_no_studyresponse_confirm_dccdecision(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
# self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertFalse(context['show_study_response_status'])
self.assertNotContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertTrue(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertTrue(context['show_decision_confirm'])
self.assertContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_decision_comment'])
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_no_studyresponse_confirm_dccdecision'].dcc_review.dcc_decision.comment)
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk]))
self.assertTrue(context['show_dcc_decision_update_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_no_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-success')
def test_context_followup_dccreview_agree_studyresponse_archived(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertTrue(context['show_study_response_status'])
self.assertContains(response, 'The study')
self.assertTrue(context['show_study_agrees'])
self.assertContains(response, 'should be removed')
self.assertFalse(context['show_study_disagrees'])
self.assertNotContains(response, 'should remain tagged')
self.assertFalse(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
self.assertFalse(context['show_decision_comment'])
self.assertTrue(context['show_archived'])
self.assertContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk]))
self.assertFalse(context['show_dcc_decision_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_agree_studyresponse_archived'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-danger')
def test_context_followup_dccreview_disagree_studyresponse_remove_dccdecision_archived(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertNotContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertTrue(context['show_study_response_status'])
self.assertContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertTrue(context['show_study_disagrees'])
self.assertContains(response, 'should remain tagged')
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'
].dcc_review.study_response.comment)
self.assertTrue(context['show_dcc_decision'])
self.assertTrue(context['show_decision_remove'])
self.assertContains(response, 'will be removed by the DCC')
self.assertFalse(context['show_decision_confirm'])
self.assertTrue(context['show_decision_comment'])
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'
].dcc_review.dcc_decision.comment)
self.assertTrue(context['show_archived'])
self.assertContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].pk]))
self.assertTrue(context['show_dcc_decision_update_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits[
'followup_dccreview_disagree_studyresponse_remove_dccdecision_archived'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-danger')
def test_context_followup_dccreview_disagree_studyresponse_confirm_dccdecision(self):
"""Context variables and page content are as expected for this type of tagged trait."""
response = self.client.get(self.get_url(
self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk))
context = response.context
self.assertEqual(response.status_code, 200)
self.assertTrue(context['show_quality_review_panel'])
self.assertContains(response, '#collapse-reviewstatus')
self.assertNotContains(response, 'not yet been reviewed')
self.assertFalse(context['show_dcc_review_confirmed'])
self.assertTrue(context['show_dcc_review_needs_followup'])
self.assertContains(response, 'flagged for removal')
self.assertTrue(context['show_study_response_status'])
self.assertContains(response, 'The study')
self.assertFalse(context['show_study_agrees'])
self.assertNotContains(response, 'should be removed')
self.assertTrue(context['show_study_disagrees'])
self.assertContains(response, 'should remain tagged')
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_confirm_dccdecision'].dcc_review.study_response.comment)
self.assertTrue(context['show_dcc_decision'])
self.assertFalse(context['show_decision_remove'])
self.assertNotContains(response, 'will be removed by the DCC')
self.assertTrue(context['show_decision_confirm'])
self.assertContains(response, 'confirmed by the DCC')
self.assertTrue(context['show_decision_comment'])
self.assertContains(
response,
self.tagged_traits[
'followup_dccreview_disagree_studyresponse_confirm_dccdecision'].dcc_review.dcc_decision.comment)
self.assertFalse(context['show_archived'])
self.assertNotContains(response, 'has been removed by the DCC')
self.assertFalse(context['show_dcc_review_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:new',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_review_update_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-review:update',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_dcc_decision_add_button'])
self.assertNotContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:new',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk]))
self.assertTrue(context['show_dcc_decision_update_button'])
self.assertContains(
response,
reverse('tags:tagged-traits:pk:dcc-decision:update',
args=[self.tagged_traits['followup_dccreview_disagree_studyresponse_confirm_dccdecision'].pk]))
self.assertFalse(context['show_delete_button'])
self.assertEqual(context['quality_review_panel_color'], 'bg-success')
class TaggedTraitDetailOtherUserTest(UserLoginTestCase):
def get_url(self, *args):
return reverse('tags:tagged-traits:pk:detail', args=args)
def setUp(self):
super().setUp()
self.tagged_trait = factories.TaggedTraitFactory.create()
def test_forbidden_non_taggers(self):
"""Returns 403 code when the user is not in phenotype_taggers."""
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
self.user.groups.remove(phenotype_taggers)
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_empty_taggable_studies(self):
"""Returns 403 code when the user has no taggable_studies."""
self.user.profile.taggable_studies.clear()
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_wrong_taggable_study(self):
"""Returns 403 code when the user is from a different study."""
other_study_tagged_trait = factories.TaggedTraitFactory.create()
response = self.client.get(self.get_url(other_study_tagged_trait.pk))
self.assertEqual(response.status_code, 403)
class TaggedTraitTagCountsByStudyTest(UserLoginTestCase):
def setUp(self):
super(TaggedTraitTagCountsByStudyTest, self).setUp()
def make_fake_data(self):
self.tags = factories.TagFactory.create_batch(2)
self.studies = StudyFactory.create_batch(2)
self.taggedtraits = []
for tag in self.tags:
for study in self.studies:
self.taggedtraits.extend(
factories.TaggedTraitFactory.create_batch(
2, tag=tag, trait__source_dataset__source_study_version__study=study)
)
def get_url(self, *args):
return reverse('tags:tagged-traits:by-study')
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('taggedtrait_tag_counts_by_study', context)
def test_context_data_is_correct(self):
"""Data in the context is correct."""
self.make_fake_data()
response = self.client.get(self.get_url())
context = response.context
for study in context['taggedtrait_tag_counts_by_study']:
for tag in study[1]:
tag_study_count = models.TaggedTrait.objects.filter(
tag__pk=tag['tag_pk'],
trait__source_dataset__source_study_version__study__pk=study[0]['study_pk']).count()
self.assertEqual(tag['tt_count'], tag_study_count)
def test_count_does_not_include_archived_taggedtraits(self):
"""Counts do not include archived tagged traits."""
self.tag = factories.TagFactory.create()
study = StudyFactory.create()
archived_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=study, archived=True)
non_archived_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=study, archived=False)
response = self.client.get(self.get_url())
counts = response.context['taggedtrait_tag_counts_by_study']
self.assertEqual(counts[0][1][0]['tt_count'], 1)
def test_no_deprecated_traits(self):
"""Counts exclude traits tagged from deprecated study versions."""
tag = factories.TagFactory.create()
study = StudyFactory.create()
current_study_version = SourceStudyVersionFactory.create(study=study, i_version=5)
old_study_version = SourceStudyVersionFactory.create(study=study, i_version=4, i_is_deprecated=True)
current_trait = SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)
old_trait = SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)
current_tagged_trait = factories.TaggedTraitFactory.create(trait=current_trait, tag=tag)
old_tagged_trait = factories.TaggedTraitFactory.create(trait=old_trait, tag=tag)
response = self.client.get(self.get_url())
context = response.context
counts = response.context['taggedtrait_tag_counts_by_study']
self.assertEqual(counts[0][1][0]['tt_count'], 1)
def test_no_deprecated_traits_with_same_version_number(self):
"""Counts exclude traits tagged from deprecated study versions even with same version number."""
# This directly addresses the unusual CARDIA situation where there are two study versions with the
# same version number, one of which is deprecated.
tag = factories.TagFactory.create()
study = StudyFactory.create()
current_study_version = SourceStudyVersionFactory.create(study=study, i_version=5)
old_study_version = SourceStudyVersionFactory.create(study=study, i_version=5, i_is_deprecated=True)
current_trait = SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)
old_trait = SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)
current_tagged_trait = factories.TaggedTraitFactory.create(trait=current_trait, tag=tag)
old_tagged_trait = factories.TaggedTraitFactory.create(trait=old_trait, tag=tag)
response = self.client.get(self.get_url())
context = response.context
counts = response.context['taggedtrait_tag_counts_by_study']
self.assertEqual(counts[0][1][0]['tt_count'], 1)
class TaggedTraitStudyCountsByTagTest(UserLoginTestCase):
def setUp(self):
super(TaggedTraitStudyCountsByTagTest, self).setUp()
def make_fake_data(self):
self.tags = factories.TagFactory.create_batch(2)
self.studies = StudyFactory.create_batch(2)
self.taggedtraits = []
for tag in self.tags:
for study in self.studies:
self.taggedtraits.append(
factories.TaggedTraitFactory.create_batch(
2, tag=tag, trait__source_dataset__source_study_version__study=study)
)
def get_url(self, *args):
return reverse('tags:tagged-traits:by-tag')
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('taggedtrait_study_counts_by_tag', context)
def test_context_data_is_correct(self):
"""Data in the context is correct."""
self.make_fake_data()
response = self.client.get(self.get_url())
context = response.context
for tag in context['taggedtrait_study_counts_by_tag']:
for study in tag[1]:
study_tag_count = models.TaggedTrait.objects.filter(
tag__pk=tag[0]['tag_pk'],
trait__source_dataset__source_study_version__study__pk=study['study_pk']).count()
self.assertEqual(study['tt_count'], study_tag_count)
def test_count_does_not_include_archived_taggedtraits(self):
"""Counts do not include archived tagged traits."""
self.tag = factories.TagFactory.create()
study = StudyFactory.create()
archived_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=study, archived=True)
non_archived_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=study, archived=False)
response = self.client.get(self.get_url())
counts = response.context['taggedtrait_study_counts_by_tag']
self.assertEqual(counts[0][1][0]['tt_count'], 1)
def test_no_deprecated_traits(self):
"""Counts exclude traits tagged from deprecated study versions."""
tag = factories.TagFactory.create()
study = StudyFactory.create()
current_study_version = SourceStudyVersionFactory.create(study=study, i_version=5)
old_study_version = SourceStudyVersionFactory.create(study=study, i_version=4, i_is_deprecated=True)
current_trait = SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)
old_trait = SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)
current_tagged_trait = factories.TaggedTraitFactory.create(trait=current_trait, tag=tag)
old_tagged_trait = factories.TaggedTraitFactory.create(trait=old_trait, tag=tag)
response = self.client.get(self.get_url())
context = response.context
counts = response.context['taggedtrait_study_counts_by_tag']
self.assertEqual(counts[0][1][0]['tt_count'], 1)
def test_no_deprecated_traits_with_same_version_number(self):
"""Counts exclude traits tagged from deprecated study versions even with same version number."""
# This directly addresses the unusual CARDIA situation where there are two study versions with the
# same version number, one of which is deprecated.
tag = factories.TagFactory.create()
study = StudyFactory.create()
current_study_version = SourceStudyVersionFactory.create(study=study, i_version=5)
old_study_version = SourceStudyVersionFactory.create(study=study, i_version=5, i_is_deprecated=True)
current_trait = SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)
old_trait = SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)
current_tagged_trait = factories.TaggedTraitFactory.create(trait=current_trait, tag=tag)
old_tagged_trait = factories.TaggedTraitFactory.create(trait=old_trait, tag=tag)
response = self.client.get(self.get_url())
context = response.context
counts = response.context['taggedtrait_study_counts_by_tag']
self.assertEqual(counts[0][1][0]['tt_count'], 1)
class TaggedTraitByTagAndStudyListTestsMixin(object):
def get_url(self, *args):
return reverse('tags:tag:study:list', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_study_pk(self):
"""Returns 404 response code when the study pk doesn't exist."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk + 1))
self.assertEqual(response.status_code, 404)
def test_view_with_invalid_tag_pk(self):
"""Returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.tag.pk + 1, self.study.pk))
self.assertEqual(response.status_code, 404)
def test_table_contains_correct_records(self):
"""All expected tagged traits are in the table."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
table = context['tagged_trait_table']
self.assertEqual(len(table.data), len(self.tagged_traits))
for tagged_trait in self.tagged_traits:
self.assertIn(tagged_trait, table.data, msg='tagged_trait_table does not contain {}'.format(tagged_trait))
def test_works_with_no_tagged_traits_in_study(self):
"""Table has zero rows when there are no tagged traits."""
other_study = StudyFactory.create()
other_tag = factories.TagFactory.create()
response = self.client.get(self.get_url(other_tag.pk, other_study.pk))
self.assertEqual(response.status_code, 200)
context = response.context
self.assertEqual(len(context['tagged_trait_table'].data), 0)
def test_does_not_show_tagged_traits_from_a_different_study(self):
"""Table does not include tagged trait from a different study."""
other_study = StudyFactory.create()
other_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=other_study)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertNotIn(other_tagged_trait, context['tagged_trait_table'].data)
def test_does_not_show_tagged_traits_from_a_different_tag(self):
"""Table does not include tagged trait with a different tag."""
other_tag = factories.TagFactory.create()
other_tagged_trait = factories.TaggedTraitFactory.create(
tag=other_tag, trait__source_dataset__source_study_version__study=self.study)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertNotIn(other_tagged_trait, context['tagged_trait_table'].data)
def test_no_deprecated_traits(self):
"""Counts exclude traits tagged from deprecated study versions."""
tag = factories.TagFactory.create()
study = StudyFactory.create()
current_study_version = SourceStudyVersionFactory.create(study=study, i_version=5)
old_study_version = SourceStudyVersionFactory.create(study=study, i_version=4, i_is_deprecated=True)
current_trait = SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)
old_trait = SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)
current_tagged_trait = factories.TaggedTraitFactory.create(trait=current_trait, tag=tag)
old_tagged_trait = factories.TaggedTraitFactory.create(trait=old_trait, tag=tag)
response = self.client.get(self.get_url(tag.pk, study.pk))
context = response.context
self.assertIn(current_tagged_trait, context['tagged_trait_table'].data)
self.assertNotIn(old_tagged_trait, context['tagged_trait_table'].data)
def test_no_deprecated_traits_with_same_version_number(self):
"""Counts exclude traits tagged from deprecated study versions even with same version number."""
# This directly addresses the unusual CARDIA situation where there are two study versions with the
# same version number, one of which is deprecated.
tag = factories.TagFactory.create()
study = StudyFactory.create()
current_study_version = SourceStudyVersionFactory.create(study=study, i_version=5)
old_study_version = SourceStudyVersionFactory.create(study=study, i_version=5, i_is_deprecated=True)
current_trait = SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)
old_trait = SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)
current_tagged_trait = factories.TaggedTraitFactory.create(trait=current_trait, tag=tag)
old_tagged_trait = factories.TaggedTraitFactory.create(trait=old_trait, tag=tag)
response = self.client.get(self.get_url(tag.pk, study.pk))
context = response.context
self.assertIn(current_tagged_trait, context['tagged_trait_table'].data)
self.assertNotIn(old_tagged_trait, context['tagged_trait_table'].data)
class TaggedTraitByTagAndStudyListTest(TaggedTraitByTagAndStudyListTestsMixin, UserLoginTestCase):
def setUp(self):
super(TaggedTraitByTagAndStudyListTest, self).setUp()
self.study = StudyFactory.create()
self.tag = factories.TagFactory.create()
self.tagged_traits = factories.TaggedTraitFactory.create_batch(
10, tag=self.tag, trait__source_dataset__source_study_version__study=self.study)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertIn('study', context)
self.assertIn('tag', context)
self.assertIn('tagged_trait_table', context)
self.assertEqual(context['study'], self.study)
self.assertEqual(context['tag'], self.tag)
self.assertIn('show_review_button', context)
self.assertFalse(context['show_review_button'])
def test_table_class(self):
"""For non-taggers, the tagged trait table class does not have delete buttons."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertIsInstance(context['tagged_trait_table'], tables.TaggedTraitTable)
def test_no_detail_page_links(self):
"""Contains no links to the TaggedTraitDetail view."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
for tagged_trait in self.tagged_traits:
self.assertNotIn(tagged_trait.get_absolute_url(), str(response.content))
def test_no_archived_taggedtraits(self):
"""Archived tagged traits do not appear in the table."""
models.TaggedTrait.objects.all().delete()
archived_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study, archived=True)
non_archived_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study, archived=False)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertIn('tagged_trait_table', response.context)
table = response.context['tagged_trait_table']
self.assertNotIn(archived_tagged_trait, table.data)
self.assertIn(non_archived_tagged_trait, table.data)
class TaggedTraitByTagAndStudyListPhenotypeTaggerTest(TaggedTraitByTagAndStudyListTestsMixin,
PhenotypeTaggerLoginTestCase):
def setUp(self):
super(TaggedTraitByTagAndStudyListPhenotypeTaggerTest, self).setUp()
self.tag = factories.TagFactory.create()
self.tagged_traits = factories.TaggedTraitFactory.create_batch(
10, trait__source_dataset__source_study_version__study=self.study, tag=self.tag)
self.user.refresh_from_db()
self.user.profile.taggable_studies.add(self.study)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertIn('study', context)
self.assertIn('tag', context)
self.assertIn('tagged_trait_table', context)
self.assertEqual(context['study'], self.study)
self.assertEqual(context['tag'], self.tag)
self.assertIn('show_review_button', context)
self.assertFalse(context['show_review_button'])
def test_table_class(self):
"""For taggers, the tagged trait table class is correct."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertIsInstance(context['tagged_trait_table'], tables.TaggedTraitTableForPhenotypeTaggersFromStudy)
def test_contains_detail_page_links(self):
"""Contains links to the TaggedTraitDetail view."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
for tagged_trait in self.tagged_traits:
self.assertIn(tagged_trait.get_absolute_url(), str(response.content))
def test_no_archived_taggedtraits(self):
"""Archived tagged traits do not appear in the table."""
models.TaggedTrait.objects.all().delete()
archived_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study, archived=True)
non_archived_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study, archived=False)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertIn('tagged_trait_table', response.context)
table = response.context['tagged_trait_table']
self.assertNotIn(archived_tagged_trait, table.data)
self.assertIn(non_archived_tagged_trait, table.data)
class TaggedTraitByTagAndStudyListDCCAnalystTest(TaggedTraitByTagAndStudyListTestsMixin, DCCAnalystLoginTestCase):
def setUp(self):
super(TaggedTraitByTagAndStudyListDCCAnalystTest, self).setUp()
self.study = StudyFactory.create()
self.tag = factories.TagFactory.create()
self.tagged_traits = factories.TaggedTraitFactory.create_batch(
10, trait__source_dataset__source_study_version__study=self.study, tag=self.tag)
self.user.refresh_from_db()
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertIn('study', context)
self.assertIn('tag', context)
self.assertIn('tagged_trait_table', context)
self.assertEqual(context['study'], self.study)
self.assertEqual(context['tag'], self.tag)
self.assertIn('show_review_button', context)
self.assertTrue(context['show_review_button'])
def test_table_class(self):
"""For DCC Analysts, the tagged trait table class has delete buttons."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertIsInstance(context['tagged_trait_table'], tables.TaggedTraitTableForStaffByStudy)
def test_contains_detail_page_links(self):
"""Contains links to the TaggedTraitDetail view."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
for tagged_trait in self.tagged_traits:
self.assertIn(tagged_trait.get_absolute_url(), str(response.content))
def test_includes_archived_taggedtraits(self):
"""Archived tagged traits do appear in the table."""
models.TaggedTrait.objects.all().delete()
archived_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study, archived=True)
non_archived_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study, archived=False)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertIn('tagged_trait_table', response.context)
table = response.context['tagged_trait_table']
self.assertIn(archived_tagged_trait, table.data)
self.assertIn(non_archived_tagged_trait, table.data)
class TaggedTraitCreateTestsMixin(object):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:add-one:main')
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url())
context = response.context
self.assertTrue('form' in context)
def test_creates_new_object(self):
"""Posting valid data to the form correctly tags a trait."""
# Check on redirection to detail page, M2M links, and creation message.
response = self.client.post(self.get_url(), {'trait': self.trait.pk, 'tag': self.tag.pk, })
new_object = models.TaggedTrait.objects.latest('pk')
self.assertIsInstance(new_object, models.TaggedTrait)
self.assertRedirects(response, reverse('tags:tag:detail', args=[new_object.tag.pk]))
self.assertEqual(new_object.tag, self.tag)
self.assertEqual(new_object.trait, self.trait)
self.assertIn(self.trait, self.tag.all_traits.all())
self.assertIn(self.tag, self.trait.all_tags.all())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_tags_traits_from_two_studies(self):
"""Correctly able to tag traits from two different studies."""
study2 = StudyFactory.create()
self.user.profile.taggable_studies.add(study2)
trait2 = SourceTraitFactory.create(source_dataset__source_study_version__study=study2)
# Tag the two traits.
response1 = self.client.post(self.get_url(), {'trait': self.trait.pk, 'tag': self.tag.pk, })
response2 = self.client.post(self.get_url(), {'trait': trait2.pk, 'tag': self.tag.pk, })
# Correctly goes to the tag's detail page and shows a success message.
self.assertRedirects(response1, self.tag.get_absolute_url())
self.assertRedirects(response2, self.tag.get_absolute_url())
# Correctly creates a tagged_trait for each trait.
for trait_pk in [self.trait.pk, trait2.pk]:
trait = SourceTrait.objects.get(pk=trait_pk)
tagged_trait = models.TaggedTrait.objects.get(trait__pk=trait_pk, tag=self.tag)
self.assertIn(trait, self.tag.all_traits.all())
self.assertIn(self.tag, trait.all_tags.all())
def test_invalid_form_message(self):
"""Posting invalid data results in a message about the invalidity."""
response = self.client.post(self.get_url(), {'trait': '', 'tag': self.tag.pk, })
self.assertFormError(response, 'form', 'trait', 'This field is required.')
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_post_blank_trait(self):
"""Posting bad data to the form doesn't tag the trait and shows a form error."""
response = self.client.post(self.get_url(), {'trait': '', 'tag': self.tag.pk, })
self.assertFormError(response, 'form', 'trait', 'This field is required.')
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
form = response.context['form']
self.assertEqual(form['trait'].errors, [u'This field is required.'])
self.assertNotIn(self.tag, self.trait.all_tags.all())
def test_adds_user(self):
"""When a trait is successfully tagged, it has the appropriate creator."""
response = self.client.post(self.get_url(),
{'trait': self.trait.pk, 'tag': self.tag.pk, })
new_object = models.TaggedTrait.objects.latest('pk')
self.assertEqual(self.user, new_object.creator)
def test_fails_when_trait_is_already_tagged(self):
"""Tagging a trait fails when the trait has already been tagged with this tag."""
tagged_trait = factories.TaggedTraitFactory.create(tag=self.tag, trait=self.trait)
response = self.client.post(self.get_url(), {'trait': self.trait.pk, 'tag': self.tag.pk, })
expected_error = forms.EXISTING_TAGGED_TRAIT_ERROR_STRING.format(
tag_name=self.tag.title, phv=self.trait.full_accession, trait_name=self.trait.i_trait_name)
self.assertFormError(response, 'form', 'trait', expected_error)
self.assertEqual(response.status_code, 200)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_fails_when_trait_is_already_tagged_but_archived(self):
"""Tagging a trait fails when the trait has already been tagged with this tag and archived."""
tagged_trait = factories.TaggedTraitFactory.create(tag=self.tag, trait=self.trait, archived=True)
response = self.client.post(self.get_url(), {'trait': self.trait.pk, 'tag': self.tag.pk, })
expected_error = forms.ARCHIVED_EXISTING_TAGGED_TRAIT_ERROR_STRING.format(
tag_name=self.tag.title, phv=self.trait.full_accession, trait_name=self.trait.i_trait_name)
self.assertEqual(response.status_code, 200)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_fails_when_trait_is_deprecated(self):
"""Can't tag a deprecated source trait."""
sv = self.trait.source_dataset.source_study_version
sv.i_is_deprecated = True
sv.save()
response = self.client.post(self.get_url(), {'trait': self.trait.pk, 'tag': self.tag.pk, })
self.assertFormError(response, 'form', 'trait',
'Select a valid choice. That choice is not one of the available choices.')
self.assertEqual(response.status_code, 200)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
class TaggedTraitCreatePhenotypeTaggerTest(TaggedTraitCreateTestsMixin, PhenotypeTaggerLoginTestCase):
def setUp(self):
super(TaggedTraitCreatePhenotypeTaggerTest, self).setUp()
self.trait = SourceTraitFactory.create(source_dataset__source_study_version__study=self.study)
self.tag = factories.TagFactory.create()
self.user.refresh_from_db()
def test_fails_with_other_study_trait(self):
"""Tagging a trait fails when the trait is not in the user's taggable_studies'."""
study2 = StudyFactory.create()
trait2 = SourceTraitFactory.create(source_dataset__source_study_version__study=study2)
response = self.client.post(self.get_url(), {'trait': trait2.pk, 'tag': self.tag.pk, })
self.assertFormError(
response, 'form', 'trait',
'Select a valid choice. That choice is not one of the available choices.')
# They have taggable studies and they're in the phenotype_taggers group, so view is still accessible.
self.assertEqual(response.status_code, 200)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_forbidden_non_taggers(self):
"""Returns 403 code when the user is not in phenotype_taggers."""
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
self.user.groups.remove(phenotype_taggers)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_forbidden_empty_taggable_studies(self):
"""Returns 403 code when the user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.trait.source_dataset.source_study_version.study)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
class TaggedTraitCreateDCCAnalystTest(TaggedTraitCreateTestsMixin, DCCAnalystLoginTestCase):
def setUp(self):
super(TaggedTraitCreateDCCAnalystTest, self).setUp()
self.trait = SourceTraitFactory.create()
self.tag = factories.TagFactory.create()
self.user.refresh_from_db()
def test_tag_other_study_trait(self):
"""Tagging a trait from another study works since the analyst doesn't have taggable_studies."""
study2 = StudyFactory.create()
trait2 = SourceTraitFactory.create(source_dataset__source_study_version__study=study2)
response = self.client.post(self.get_url(), {'trait': trait2.pk, 'tag': self.tag.pk, })
# View redirects to success url.
self.assertRedirects(response, reverse('tags:tag:detail', args=[self.tag.pk]))
self.assertEqual(response.status_code, 302)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_view_success_without_phenotype_taggers_group(self):
"""View is accessible even when the DCC user is not in phenotype_taggers."""
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
self.user.groups.remove(phenotype_taggers)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_view_success_with_empty_taggable_studies(self):
"""View is accessible when the DCC user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.trait.source_dataset.source_study_version.study)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
class TaggedTraitDeleteTestsMixin(object):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:pk:delete', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_pk(self):
"""Returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.tagged_trait.pk + 1))
self.assertEqual(response.status_code, 404)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertIn('tagged_trait', context)
self.assertEqual(context['tagged_trait'], self.tagged_trait)
self.assertEqual(context['next_url'], None)
def test_deletes_unreviewed(self):
"""Posting 'submit' to the form correctly deletes the tagged_trait."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {'submit': ''})
self.assertEqual(response.status_code, 302)
with self.assertRaises(models.TaggedTrait.DoesNotExist):
self.tagged_trait.refresh_from_db()
self.assertEqual(models.TaggedTrait.objects.count(), 0)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_post_anything_deletes_unreviewed(self):
"""Posting anything at all, even an empty dict, deletes the object."""
# Is this really the behavior I want? I'm not sure...
# Sounds like it might be:
# https://stackoverflow.com/questions/17678689/how-to-add-a-cancel-button-to-deleteview-in-django
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertEqual(response.status_code, 302)
with self.assertRaises(models.TaggedTrait.DoesNotExist):
self.tagged_trait.refresh_from_db()
self.assertEqual(models.TaggedTrait.objects.count(), 0)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_confirmed_tagged_trait_get_request_redirects_before_confirmation_view(self):
"""Cannot delete a TaggedTrait that has been confirmed by the DCC."""
dcc_review = factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait,
status=models.DCCReview.STATUS_CONFIRMED)
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 302)
# Make sure it wasn't deleted.
self.assertIn(self.tagged_trait, models.TaggedTrait.objects.all())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn(views.CONFIRMED_TAGGED_TRAIT_DELETE_ERROR_MESSAGE, str(messages[0]))
def test_does_not_delete_confirmed(self):
"""Cannot delete a TaggedTrait that has been confirmed by the DCC."""
dcc_review = factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait,
status=models.DCCReview.STATUS_CONFIRMED)
response = self.client.post(self.get_url(self.tagged_trait.pk), {'submit': ''})
self.assertEqual(response.status_code, 302)
# Make sure it wasn't deleted.
self.assertIn(self.tagged_trait, models.TaggedTrait.objects.all())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn(views.CONFIRMED_TAGGED_TRAIT_DELETE_ERROR_MESSAGE, str(messages[0]))
def test_needs_followup_tagged_trait_get_request_reaches_confirmation_view(self):
"""Confirmation view has success code when trying to delete a needs followup reviewed TaggedTrait."""
dcc_review = factories.DCCReviewFactory.create(
tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 200)
# Make sure it wasn't deleted.
self.assertIn(self.tagged_trait, models.TaggedTrait.objects.all())
def test_archives_need_followup(self):
"""Archives a TaggedTrait that was reviewed with needs followup."""
dcc_review = factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait,
status=models.DCCReview.STATUS_FOLLOWUP)
response = self.client.post(self.get_url(self.tagged_trait.pk), {'submit': ''})
self.assertEqual(response.status_code, 302)
self.tagged_trait.refresh_from_db()
self.assertTrue(self.tagged_trait.archived)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertNotIn(views.CONFIRMED_TAGGED_TRAIT_DELETE_ERROR_MESSAGE, str(messages[0]))
def test_next_url(self):
"""next_url in context matches the starting url."""
starting_url = reverse('trait_browser:source:traits:detail', args=[self.trait.pk])
url_with_next = self.get_url(self.tagged_trait.pk) + '?next={}'.format(starting_url)
response = self.client.get(url_with_next)
context = response.context
self.assertEqual(context['next_url'], starting_url)
def test_success_url_sourcetraitdetail(self):
"""Redirects to the source trait detail page as expected."""
starting_url = reverse('trait_browser:source:traits:detail', args=[self.trait.pk])
url_with_next = self.get_url(self.tagged_trait.pk) + '?next={}'.format(starting_url)
response = self.client.post(url_with_next, {'submit': ''})
self.assertRedirects(response, starting_url)
def test_success_url_taggedtraitdetail(self):
"""Redirects to the TaggedTraitByTagAndStudyList view as expected."""
starting_url = reverse('tags:tagged-traits:pk:detail', args=[self.tagged_trait.pk])
tag_study_list_url = reverse(
'tags:tag:study:list',
kwargs={'pk_study': self.trait.source_dataset.source_study_version.study.pk,
'pk': self.tag.pk}
)
url_with_next = self.get_url(self.tagged_trait.pk) + '?next={}'.format(starting_url)
response = self.client.post(url_with_next, {'submit': ''})
self.assertRedirects(response, tag_study_list_url)
def test_success_url_profile(self):
"""Redirects to the profile page as expected."""
starting_url = reverse('profiles:profile')
url_with_next = self.get_url(self.tagged_trait.pk) + '?next={}'.format(starting_url)
response = self.client.post(url_with_next, {'submit': ''})
self.assertRedirects(response, starting_url)
def test_success_url_no_starting_url(self):
"""Redirects to the profile page as expected."""
tag_study_list_url = reverse(
'tags:tag:study:list',
kwargs={'pk_study': self.trait.source_dataset.source_study_version.study.pk,
'pk': self.tag.pk}
)
response = self.client.post(self.get_url(self.tagged_trait.pk), {'submit': ''})
self.assertRedirects(response, tag_study_list_url)
class TaggedTraitDeletePhenotypeTaggerTest(TaggedTraitDeleteTestsMixin, PhenotypeTaggerLoginTestCase):
def setUp(self):
super(TaggedTraitDeletePhenotypeTaggerTest, self).setUp()
self.trait = SourceTraitFactory.create(source_dataset__source_study_version__study=self.study)
self.tag = factories.TagFactory.create()
self.user.refresh_from_db()
self.tagged_trait = models.TaggedTrait.objects.create(trait=self.trait, tag=self.tag, creator=self.user)
def test_deletes_unreviewed_tagged_by_other_user(self):
"""User can delete a tagged trait that was created by someone else from the same study."""
trait = SourceTraitFactory.create(source_dataset__source_study_version__study=self.study)
other_user = UserFactory.create()
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
other_user.groups.add(phenotype_taggers)
other_user.profile.taggable_studies.add(self.study)
other_user_tagged_trait = models.TaggedTrait.objects.create(trait=trait, tag=self.tag, creator=other_user)
response = self.client.post(self.get_url(other_user_tagged_trait.pk), {'submit': ''})
self.assertEqual(response.status_code, 302)
with self.assertRaises(models.TaggedTrait.DoesNotExist):
other_user_tagged_trait.refresh_from_db()
self.assertEqual(models.TaggedTrait.objects.count(), 1)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_forbidden_non_taggers(self):
"""Returns 403 code when the user is not in phenotype_taggers."""
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
self.user.groups.remove(phenotype_taggers)
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_wrong_taggable_studies(self):
"""Returns 403 code when the user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.trait.source_dataset.source_study_version.study)
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
class TaggedTraitDeleteDCCAnalystTest(TaggedTraitDeleteTestsMixin, DCCAnalystLoginTestCase):
def setUp(self):
super(TaggedTraitDeleteDCCAnalystTest, self).setUp()
self.trait = SourceTraitFactory.create()
self.tag = factories.TagFactory.create()
self.user.refresh_from_db()
self.tagged_trait = models.TaggedTrait.objects.create(trait=self.trait, tag=self.tag, creator=self.user)
def test_deletes_unreviewed_tagged_by_other_user(self):
"""User can delete a tagged trait that was created by someone else from the same study."""
trait = SourceTraitFactory.create(
source_dataset__source_study_version__study=self.trait.source_dataset.source_study_version.study)
other_user = UserFactory.create()
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
other_user.groups.add(phenotype_taggers)
other_user.profile.taggable_studies.add(self.trait.source_dataset.source_study_version.study)
other_user_tagged_trait = models.TaggedTrait.objects.create(trait=trait, tag=self.tag, creator=other_user)
response = self.client.post(self.get_url(other_user_tagged_trait.pk), {'submit': ''})
self.assertEqual(response.status_code, 302)
with self.assertRaises(models.TaggedTrait.DoesNotExist):
other_user_tagged_trait.refresh_from_db()
self.assertEqual(models.TaggedTrait.objects.count(), 1)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_view_success_without_phenotype_taggers_group(self):
"""DCC user can access the view even when they're not in phenotype_taggers."""
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
self.user.groups.remove(phenotype_taggers)
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 200)
def test_view_success_with_empty_taggable_studies(self):
"""DCC user can access the view even with no taggable_studies."""
self.user.profile.taggable_studies.remove(self.trait.source_dataset.source_study_version.study)
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 200)
class TaggedTraitCreateByTagTestsMixin(object):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:add-one:by-tag', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tag.pk))
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tag.pk))
context = response.context
self.assertTrue('form' in context)
self.assertTrue('tag' in context)
self.assertEqual(context['tag'], self.tag)
def test_creates_new_object(self):
"""Posting valid data to the form correctly tags a single trait."""
# Check on redirection to detail page, M2M links, and creation message.
form_data = {'trait': self.trait.pk, }
response = self.client.post(self.get_url(self.tag.pk), form_data)
self.assertRedirects(response, self.tag.get_absolute_url())
new_object = models.TaggedTrait.objects.latest('pk')
self.assertIsInstance(new_object, models.TaggedTrait)
self.assertEqual(new_object.tag, self.tag)
self.assertEqual(new_object.trait, self.trait)
self.assertIn(self.trait, self.tag.all_traits.all())
self.assertIn(self.tag, self.trait.all_tags.all())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_tags_traits_from_two_studies(self):
"""Correctly able to tag traits from two different studies."""
study2 = StudyFactory.create()
self.user.profile.taggable_studies.add(study2)
trait2 = SourceTraitFactory.create(source_dataset__source_study_version__study=study2)
# Tag the two traits.
response1 = self.client.post(self.get_url(self.tag.pk), {'trait': self.trait.pk, })
response2 = self.client.post(self.get_url(self.tag.pk), {'trait': trait2.pk, })
# Correctly goes to the tag's detail page and shows a success message.
self.assertRedirects(response1, self.tag.get_absolute_url())
self.assertRedirects(response2, self.tag.get_absolute_url())
# Correctly creates a tagged_trait for each trait.
for trait_pk in [self.trait.pk, trait2.pk]:
trait = SourceTrait.objects.get(pk=trait_pk)
tagged_trait = models.TaggedTrait.objects.get(trait__pk=trait_pk, tag=self.tag)
self.assertIn(trait, self.tag.all_traits.all())
self.assertIn(self.tag, trait.all_tags.all())
def test_invalid_form_message(self):
"""Posting invalid data results in a message about the invalidity."""
response = self.client.post(self.get_url(self.tag.pk), {'trait': '', })
self.assertFormError(response, 'form', 'trait', 'This field is required.')
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_post_blank_trait(self):
"""Posting bad data to the form doesn't tag the trait and shows a form error."""
response = self.client.post(self.get_url(self.tag.pk), {'trait': '', })
self.assertFormError(response, 'form', 'trait', 'This field is required.')
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
form = response.context['form']
self.assertTrue(form.has_error('trait'))
self.assertNotIn(self.tag, self.trait.all_tags.all())
def test_adds_user(self):
"""When a trait is successfully tagged, it has the appropriate creator."""
response = self.client.post(self.get_url(self.tag.pk),
{'trait': self.trait.pk, })
new_object = models.TaggedTrait.objects.latest('pk')
self.assertEqual(self.user, new_object.creator)
def test_fails_when_trait_is_already_tagged(self):
"""Tagging a trait fails when the trait has already been tagged with this tag."""
tagged_trait = factories.TaggedTraitFactory.create(tag=self.tag, trait=self.trait)
response = self.client.post(self.get_url(self.tag.pk), {'trait': self.trait.pk, })
expected_error = forms.EXISTING_TAGGED_TRAIT_ERROR_STRING.format(
tag_name=self.tag.title, phv=self.trait.full_accession, trait_name=self.trait.i_trait_name)
self.assertFormError(response, 'form', 'trait', expected_error)
self.assertEqual(response.status_code, 200)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_fails_when_trait_is_already_tagged_but_archived(self):
"""Tagging a trait fails when the trait has already been tagged with this tag but archived."""
tagged_trait = factories.TaggedTraitFactory.create(tag=self.tag, trait=self.trait, archived=True)
response = self.client.post(self.get_url(self.tag.pk), {'trait': self.trait.pk, })
expected_error = forms.ARCHIVED_EXISTING_TAGGED_TRAIT_ERROR_STRING.format(
tag_name=self.tag.title, phv=self.trait.full_accession, trait_name=self.trait.i_trait_name)
self.assertFormError(response, 'form', 'trait', expected_error)
self.assertEqual(response.status_code, 200)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_fails_when_trait_is_deprecated(self):
"""Can't tag a deprecated source trait."""
sv = self.trait.source_dataset.source_study_version
sv.i_is_deprecated = True
sv.save()
response = self.client.post(self.get_url(self.tag.pk), {'trait': self.trait.pk, })
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'trait',
'Select a valid choice. That choice is not one of the available choices.')
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
class TaggedTraitCreateByTagPhenotypeTaggerTest(TaggedTraitCreateByTagTestsMixin, PhenotypeTaggerLoginTestCase):
def setUp(self):
super(TaggedTraitCreateByTagPhenotypeTaggerTest, self).setUp()
self.trait = SourceTraitFactory.create(source_dataset__source_study_version__study=self.study)
self.tag = factories.TagFactory.create()
self.user.refresh_from_db()
def test_fails_with_other_study_trait(self):
"""Tagging a trait fails when the trait is not in the user's taggable_studies'."""
study2 = StudyFactory.create()
trait2 = SourceTraitFactory.create(source_dataset__source_study_version__study=study2)
response = self.client.post(self.get_url(self.tag.pk), {'trait': trait2.pk, })
self.assertFormError(
response, 'form', 'trait',
'Select a valid choice. That choice is not one of the available choices.')
# They have taggable studies and they're in the phenotype_taggers group, so view is still accessible.
self.assertEqual(response.status_code, 200)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_forbidden_non_taggers(self):
"""Returns 403 code when the user is not in phenotype_taggers."""
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
self.user.groups.remove(phenotype_taggers)
response = self.client.get(self.get_url(self.tag.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_empty_taggable_studies(self):
"""Returns 403 code when the user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.trait.source_dataset.source_study_version.study)
response = self.client.get(self.get_url(self.tag.pk))
self.assertEqual(response.status_code, 403)
class TaggedTraitCreateByTagDCCAnalystTest(TaggedTraitCreateByTagTestsMixin, DCCAnalystLoginTestCase):
def setUp(self):
super(TaggedTraitCreateByTagDCCAnalystTest, self).setUp()
self.trait = SourceTraitFactory.create()
self.tag = factories.TagFactory.create()
self.user.refresh_from_db()
def test_tag_other_study_trait(self):
"""DCC user can tag a trait even when it's not in the user's taggable_studies'."""
study2 = StudyFactory.create()
trait2 = SourceTraitFactory.create(source_dataset__source_study_version__study=study2)
response = self.client.post(self.get_url(self.tag.pk), {'trait': trait2.pk, })
# View redirects to success url.
self.assertRedirects(response, reverse('tags:tag:detail', args=[self.tag.pk]))
self.assertEqual(response.status_code, 302)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_view_success_without_phenotype_taggers_group_taggers(self):
"""DCC user can access the view even though they're not in phenotype_taggers."""
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
self.user.groups.remove(phenotype_taggers)
response = self.client.get(self.get_url(self.tag.pk))
self.assertEqual(response.status_code, 200)
def test_view_success_with_empty_taggable_studies(self):
"""DCC user can access the view even without any taggable_studies."""
self.user.profile.taggable_studies.remove(self.trait.source_dataset.source_study_version.study)
response = self.client.get(self.get_url(self.tag.pk))
self.assertEqual(response.status_code, 200)
class ManyTaggedTraitsCreateTestsMixin(object):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:add-many:main')
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url())
context = response.context
self.assertTrue('form' in context)
def test_creates_single_trait(self):
"""Posting valid data to the form correctly tags a single trait."""
this_trait = self.traits[0]
form_data = {'traits': [this_trait.pk], 'tag': self.tag.pk}
response = self.client.post(self.get_url(), form_data)
# Correctly goes to the tag's detail page and shows a success message.
self.assertRedirects(response, self.tag.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
# Correctly creates a tagged_trait for each trait.
tagged_trait = models.TaggedTrait.objects.get(trait=this_trait, tag=self.tag)
self.assertIn(this_trait, self.tag.all_traits.all())
self.assertIn(self.tag, this_trait.all_tags.all())
def test_creates_two_new_objects(self):
"""Posting valid data to the form correctly tags two traits."""
# Check on redirection to detail page, M2M links, and creation message.
some_traits = self.traits[:2]
response = self.client.post(self.get_url(),
{'traits': [str(t.pk) for t in some_traits], 'tag': self.tag.pk})
self.assertRedirects(response, self.tag.get_absolute_url())
for trait in some_traits:
self.assertIn(trait, self.tag.all_traits.all())
self.assertIn(self.tag, trait.all_tags.all())
new_objects = models.TaggedTrait.objects.all()
for tt in new_objects:
self.assertEqual(tt.tag, self.tag)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_creates_all_new_objects(self):
"""Posting valid data to the form correctly tags all of the traits listed."""
form_data = {'traits': [x.pk for x in self.traits[0:5]], 'tag': self.tag.pk}
response = self.client.post(self.get_url(), form_data)
# Correctly goes to the tag's detail page and shows a success message.
self.assertRedirects(response, self.tag.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
# Correctly creates a tagged_trait for each trait.
for trait_pk in form_data['traits']:
trait = SourceTrait.objects.get(pk=trait_pk)
tagged_trait = models.TaggedTrait.objects.get(trait__pk=trait_pk, tag=self.tag)
self.assertIn(trait, self.tag.all_traits.all())
self.assertIn(self.tag, trait.all_tags.all())
def test_invalid_form_message(self):
"""Posting invalid data results in a message about the invalidity."""
response = self.client.post(self.get_url(), {'traits': '', 'tag': self.tag.pk})
self.assertFormError(response, 'form', 'traits', '"" is not a valid value for a primary key.')
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_post_blank_all_traits(self):
"""Posting bad data to the form doesn't tag the trait and shows a form error."""
response = self.client.post(self.get_url(), {'traits': [], 'tag': self.tag.pk})
self.assertFormError(response, 'form', 'traits', 'This field is required.')
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertNotIn(self.tag, self.traits[0].all_tags.all())
def test_adds_user(self):
"""When a trait is successfully tagged, it has the appropriate creator."""
response = self.client.post(self.get_url(),
{'traits': [str(self.traits[0].pk)], 'tag': self.tag.pk})
new_object = models.TaggedTrait.objects.latest('pk')
self.assertEqual(self.user, new_object.creator)
def test_fails_when_one_trait_is_already_tagged(self):
"""Tagging traits fails when a selected trait is already tagged with the tag."""
already_tagged = factories.TaggedTraitFactory.create(tag=self.tag, trait=self.traits[0])
response = self.client.post(self.get_url(),
{'traits': [t.pk for t in self.traits[0:5]], 'tag': self.tag.pk, })
self.assertEqual(response.status_code, 200)
expected_error = forms.EXISTING_TAGGED_TRAIT_ERROR_STRING.format(
tag_name=already_tagged.tag.title,
phv=already_tagged.trait.full_accession,
trait_name=already_tagged.trait.i_trait_name)
self.assertFormError(response, 'form', 'traits', expected_error)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_fails_when_one_trait_is_already_tagged_but_archived(self):
"""Tagging traits fails when a selected trait is already tagged with the tag but archived."""
already_tagged = factories.TaggedTraitFactory.create(tag=self.tag, trait=self.traits[0], archived=True)
response = self.client.post(self.get_url(),
{'traits': [t.pk for t in self.traits[0:5]], 'tag': self.tag.pk, })
self.assertEqual(response.status_code, 200)
expected_error = forms.ARCHIVED_EXISTING_TAGGED_TRAIT_ERROR_STRING.format(
tag_name=already_tagged.tag.title,
phv=already_tagged.trait.full_accession,
trait_name=already_tagged.trait.i_trait_name)
self.assertFormError(response, 'form', 'traits', expected_error)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_fails_when_one_trait_is_deprecated(self):
"""Can't tag one deprecated source trait."""
sv = self.traits[0].source_dataset.source_study_version
sv.i_is_deprecated = True
sv.save()
response = self.client.post(self.get_url(), {'traits': [self.traits[0].pk], })
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'traits',
'Select a valid choice. {} is not one of the available choices.'.format(self.traits[0].pk))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_fails_when_one_of_two_traits_is_deprecated(self):
"""Can't tag one deprecated source trait."""
deprecated_trait = SourceTraitFactory.create(source_dataset__source_study_version__i_is_deprecated=True)
self.user.profile.taggable_studies.add(deprecated_trait.source_dataset.source_study_version.study)
response = self.client.post(self.get_url(), {'traits': [self.traits[0].pk, deprecated_trait.pk], })
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'traits',
'Select a valid choice. {} is not one of the available choices.'.format(deprecated_trait.pk))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_fails_when_two_traits_are_deprecated(self):
"""Can't tag two deprecated source traits."""
sv = self.traits[0].source_dataset.source_study_version
sv.i_is_deprecated = True
sv.save()
response = self.client.post(self.get_url(), {'traits': self.traits[0].pk, 'tag': self.tag.pk, })
self.assertFormError(
response, 'form', 'traits',
'Select a valid choice. {} is not one of the available choices.'.format(self.traits[0].pk))
self.assertEqual(response.status_code, 200)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
class ManyTaggedTraitsCreatePhenotypeTaggerTest(ManyTaggedTraitsCreateTestsMixin, PhenotypeTaggerLoginTestCase):
def setUp(self):
super(ManyTaggedTraitsCreatePhenotypeTaggerTest, self).setUp()
self.tag = factories.TagFactory.create()
study_version = SourceStudyVersionFactory.create(study=self.study)
self.traits = SourceTraitFactory.create_batch(10, source_dataset__source_study_version=study_version)
self.user.refresh_from_db()
def test_creates_all_new_objects_from_multiple_studies(self):
"""Correctly tags traits from two different studies in the user's taggable_studies."""
study2 = StudyFactory.create()
self.user.profile.taggable_studies.add(study2)
more_traits = SourceTraitFactory.create_batch(2, source_dataset__source_study_version__study=study2)
more_traits = self.traits[:2] + more_traits
form_data = {'traits': [x.pk for x in more_traits], 'tag': self.tag.pk}
response = self.client.post(self.get_url(), form_data)
# Correctly goes to the tag's detail page and shows a success message.
self.assertRedirects(response, self.tag.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
# Correctly creates a tagged_trait for each trait.
for trait_pk in form_data['traits']:
trait = SourceTrait.objects.get(pk=trait_pk)
tagged_trait = models.TaggedTrait.objects.get(trait__pk=trait_pk, tag=self.tag)
self.assertIn(trait, self.tag.all_traits.all())
self.assertIn(self.tag, trait.all_tags.all())
def test_fails_with_other_study_trait(self):
"""Tagging a trait fails when the trait is not in the user's taggable_studies'."""
study2 = StudyFactory.create()
other_trait = SourceTraitFactory.create(source_dataset__source_study_version__study=study2)
response = self.client.post(self.get_url(),
{'traits': [other_trait.pk], 'tag': self.tag.pk, })
# They have taggable studies and they're in the phenotype_taggers group, so view is still accessible.
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'traits',
'Select a valid choice. {} is not one of the available choices.'.format(other_trait.pk))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_forbidden_non_taggers(self):
"""Returns 403 code when the user is not in phenotype_taggers."""
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
self.user.groups.remove(phenotype_taggers)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_forbidden_empty_taggable_studies(self):
"""Returns 403 code when the user has no taggable_studies."""
self.user.profile.taggable_studies.remove(
self.traits[0].source_dataset.source_study_version.study)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
class ManyTaggedTraitsCreateDCCAnalystTest(ManyTaggedTraitsCreateTestsMixin, DCCAnalystLoginTestCase):
def setUp(self):
super(ManyTaggedTraitsCreateDCCAnalystTest, self).setUp()
self.tag = factories.TagFactory.create()
self.traits = SourceTraitFactory.create_batch(10, )
self.user.refresh_from_db()
def test_tag_other_study_traits(self):
"""DCC user can tag traits without any taggable_studies'."""
study2 = StudyFactory.create()
traits2 = SourceTraitFactory.create_batch(5, source_dataset__source_study_version__study=study2)
response = self.client.post(self.get_url(),
{'traits': [str(x.pk) for x in traits2], 'tag': self.tag.pk, })
# Correctly goes to the tag's detail page and shows a success message.
self.assertRedirects(response, self.tag.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_creates_all_new_objects_from_multiple_studies(self):
"""Correctly tags traits from two different studies in the user's taggable_studies."""
study2 = StudyFactory.create()
self.user.profile.taggable_studies.add(study2)
more_traits = SourceTraitFactory.create_batch(2, source_dataset__source_study_version__study=study2)
more_traits = self.traits[:2] + more_traits
form_data = {'traits': [x.pk for x in more_traits], 'tag': self.tag.pk}
response = self.client.post(self.get_url(), form_data)
# Correctly goes to the tag's detail page and shows a success message.
self.assertRedirects(response, self.tag.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
# Correctly creates a tagged_trait for each trait.
for trait_pk in form_data['traits']:
trait = SourceTrait.objects.get(pk=trait_pk)
tagged_trait = models.TaggedTrait.objects.get(trait__pk=trait_pk, tag=self.tag)
self.assertIn(trait, self.tag.all_traits.all())
self.assertIn(self.tag, trait.all_tags.all())
def test_view_success_without_phenotype_taggers_group(self):
"""View is accessible even when the DCC user is not in phenotype_taggers."""
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
self.user.groups.remove(phenotype_taggers)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_view_success_with_empty_taggable_studies(self):
"""View is accessible when the DCC user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.traits[0].source_dataset.source_study_version.study)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
class ManyTaggedTraitsCreateByTagTestsMixin(object):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:add-many:by-tag', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tag.pk))
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tag.pk))
context = response.context
self.assertTrue('form' in context)
def test_creates_new_object(self):
"""Posting valid data to the form correctly tags a single trait."""
# Check on redirection to detail page, M2M links, and creation message.
response = self.client.post(self.get_url(self.tag.pk), {'traits': [str(self.traits[0].pk)]})
self.assertRedirects(response, self.tag.get_absolute_url())
new_object = models.TaggedTrait.objects.latest('pk')
self.assertIsInstance(new_object, models.TaggedTrait)
self.assertEqual(new_object.tag, self.tag)
self.assertEqual(new_object.trait, self.traits[0])
self.assertIn(self.traits[0], self.tag.all_traits.all())
self.assertIn(self.tag, self.traits[0].all_tags.all())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_creates_two_new_objects(self):
"""Posting valid data to the form correctly tags two traits."""
# Check on redirection to detail page, M2M links, and creation message.
some_traits = self.traits[:2]
response = self.client.post(self.get_url(self.tag.pk), {'traits': [str(t.pk) for t in some_traits]})
self.assertRedirects(response, self.tag.get_absolute_url())
for trait in some_traits:
self.assertIn(trait, self.tag.all_traits.all())
self.assertIn(self.tag, trait.all_tags.all())
new_objects = models.TaggedTrait.objects.all()
for tt in new_objects:
self.assertEqual(tt.tag, self.tag)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_creates_all_new_objects(self):
"""Posting valid data to the form correctly tags all of the traits listed."""
# Check on redirection to detail page, M2M links, and creation message.
response = self.client.post(self.get_url(self.tag.pk),
{'traits': [str(t.pk) for t in self.traits], })
self.assertRedirects(response, self.tag.get_absolute_url())
for trait in self.traits:
self.assertIn(trait, self.tag.all_traits.all())
self.assertIn(self.tag, trait.all_tags.all())
new_objects = models.TaggedTrait.objects.all()
for tt in new_objects:
self.assertEqual(tt.tag, self.tag)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_invalid_form_message(self):
"""Posting invalid data results in a message about the invalidity."""
response = self.client.post(self.get_url(self.tag.pk), {'traits': '', })
self.assertFormError(response, 'form', 'traits', '"" is not a valid value for a primary key.')
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_post_blank_trait(self):
"""Posting bad data to the form doesn't tag the trait and shows a form error."""
response = self.client.post(self.get_url(self.tag.pk), {'traits': [], })
self.assertFormError(response, 'form', 'traits', 'This field is required.')
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
form = response.context['form']
self.assertTrue(form.has_error('traits'))
self.assertNotIn(self.tag, self.traits[0].all_tags.all())
def test_adds_user(self):
"""When a trait is successfully tagged, it has the appropriate creator."""
response = self.client.post(self.get_url(self.tag.pk),
{'traits': [str(self.traits[0].pk)], })
new_object = models.TaggedTrait.objects.latest('pk')
self.assertEqual(self.user, new_object.creator)
def test_fails_when_one_trait_is_already_tagged(self):
"""Tagging traits fails when a selected trait is already tagged with the tag."""
already_tagged = factories.TaggedTraitFactory.create(tag=self.tag, trait=self.traits[0])
response = self.client.post(self.get_url(self.tag.pk),
{'traits': [t.pk for t in self.traits[0:5]], })
self.assertEqual(response.status_code, 200)
expected_error = forms.EXISTING_TAGGED_TRAIT_ERROR_STRING.format(
tag_name=already_tagged.tag.title,
phv=already_tagged.trait.full_accession,
trait_name=already_tagged.trait.i_trait_name)
self.assertFormError(response, 'form', 'traits', expected_error)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_fails_when_one_trait_is_already_tagged_but_archived(self):
"""Tagging traits fails when a selected trait is already tagged with the tag but archived."""
already_tagged = factories.TaggedTraitFactory.create(tag=self.tag, trait=self.traits[0], archived=True)
response = self.client.post(self.get_url(self.tag.pk),
{'traits': [t.pk for t in self.traits[0:5]], })
self.assertEqual(response.status_code, 200)
expected_error = forms.ARCHIVED_EXISTING_TAGGED_TRAIT_ERROR_STRING.format(
tag_name=already_tagged.tag.title,
phv=already_tagged.trait.full_accession,
trait_name=already_tagged.trait.i_trait_name)
self.assertFormError(response, 'form', 'traits', expected_error)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_fails_when_one_trait_is_deprecated(self):
"""Can't tag one deprecated source trait."""
sv = self.traits[0].source_dataset.source_study_version
sv.i_is_deprecated = True
sv.save()
response = self.client.post(self.get_url(self.tag.pk), {'traits': [self.traits[0].pk], })
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'traits',
'Select a valid choice. {} is not one of the available choices.'.format(self.traits[0].pk))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_fails_when_one_of_two_traits_is_deprecated(self):
"""Can't tag one deprecated source trait."""
deprecated_trait = SourceTraitFactory.create(source_dataset__source_study_version__i_is_deprecated=True)
self.user.profile.taggable_studies.add(deprecated_trait.source_dataset.source_study_version.study)
response = self.client.post(self.get_url(self.tag.pk), {'traits': [self.traits[0].pk, deprecated_trait.pk], })
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'traits',
'Select a valid choice. {} is not one of the available choices.'.format(deprecated_trait.pk))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
class ManyTaggedTraitsCreateByTagPhenotypeTaggerTest(ManyTaggedTraitsCreateByTagTestsMixin,
PhenotypeTaggerLoginTestCase):
def setUp(self):
super(ManyTaggedTraitsCreateByTagPhenotypeTaggerTest, self).setUp()
self.tag = factories.TagFactory.create()
study_version = SourceStudyVersionFactory.create(study=self.study)
self.traits = SourceTraitFactory.create_batch(10, source_dataset__source_study_version=study_version)
self.user.refresh_from_db()
def test_creates_all_new_objects_from_multiple_studies(self):
"""Correctly tags traits from two different studies in the user's taggable_studies."""
study2 = StudyFactory.create()
self.user.profile.taggable_studies.add(study2)
more_traits = SourceTraitFactory.create_batch(2, source_dataset__source_study_version__study=study2)
more_traits = self.traits[:2] + more_traits
form_data = {'traits': [x.pk for x in more_traits], }
response = self.client.post(self.get_url(self.tag.pk), form_data)
# Correctly goes to the tag's detail page and shows a success message.
self.assertRedirects(response, self.tag.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
# Correctly creates a tagged_trait for each trait.
for trait_pk in form_data['traits']:
trait = SourceTrait.objects.get(pk=trait_pk)
tagged_trait = models.TaggedTrait.objects.get(trait__pk=trait_pk, tag=self.tag)
self.assertIn(trait, self.tag.all_traits.all())
self.assertIn(self.tag, trait.all_tags.all())
def test_fails_with_other_study_traits(self):
"""Tagging a trait fails when the trait is not in the user's taggable_studies'."""
study2 = StudyFactory.create()
other_trait = SourceTraitFactory.create(source_dataset__source_study_version__study=study2)
response = self.client.post(self.get_url(self.tag.pk),
{'traits': [other_trait.pk], 'tag': self.tag.pk, })
# They have taggable studies and they're in the phenotype_taggers group, so view is still accessible.
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'traits',
'Select a valid choice. {} is not one of the available choices.'.format(other_trait.pk))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_forbidden_non_taggers(self):
"""Returns 403 code when the user is not in phenotype_taggers."""
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
self.user.groups.remove(phenotype_taggers)
response = self.client.get(self.get_url(self.tag.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_empty_taggable_studies(self):
"""Returns 403 code when the user has no taggable_studies."""
self.user.profile.taggable_studies.remove(
self.traits[0].source_dataset.source_study_version.study)
response = self.client.get(self.get_url(self.tag.pk))
self.assertEqual(response.status_code, 403)
class ManyTaggedTraitsCreateByTagDCCAnalystTest(ManyTaggedTraitsCreateByTagTestsMixin, DCCAnalystLoginTestCase):
def setUp(self):
super(ManyTaggedTraitsCreateByTagDCCAnalystTest, self).setUp()
self.tag = factories.TagFactory.create()
self.traits = SourceTraitFactory.create_batch(10, )
self.user.refresh_from_db()
def test_tag_other_study_traits(self):
"""DCC user can tag traits without any taggable_studies."""
study2 = StudyFactory.create()
traits2 = SourceTraitFactory.create_batch(5, source_dataset__source_study_version__study=study2)
response = self.client.post(self.get_url(self.tag.pk),
{'traits': [str(x.pk) for x in traits2], 'tag': self.tag.pk, })
# Correctly goes to the tag's detail page and shows a success message.
self.assertRedirects(response, self.tag.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_view_success_without_phenotype_taggers_group(self):
"""Returns success code even when the DCC user is not in phenotype_taggers."""
phenotype_taggers = Group.objects.get(name='phenotype_taggers')
self.user.groups.remove(phenotype_taggers)
response = self.client.get(self.get_url(self.tag.pk))
self.assertEqual(response.status_code, 200)
def test_view_success_with_empty_taggable_studies(self):
"""Returns success code even when the DCC user has no taggable_studies."""
self.user.profile.taggable_studies.remove(self.traits[0].source_dataset.source_study_version.study)
response = self.client.get(self.get_url(self.tag.pk))
self.assertEqual(response.status_code, 200)
class DCCReviewByTagAndStudySelectDCCTestsMixin(object):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.study = StudyFactory.create()
self.tagged_traits = factories.TaggedTraitFactory.create_batch(
10,
tag=self.tag,
trait__source_dataset__source_study_version__study=self.study
)
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:dcc-review:select', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url())
context = response.context
self.assertTrue('form' in context)
self.assertIsInstance(context['form'], forms.DCCReviewTagAndStudySelectForm)
def test_post_blank_trait(self):
"""Posting bad data to the form shows a form error and doesn't set session variables."""
response = self.client.post(self.get_url(), {'tag': '', 'study': ''})
self.assertFormError(response, 'form', 'tag', 'This field is required.')
session = self.client.session
self.assertNotIn('tagged_trait_review_by_tag_and_study_info', session)
def test_post_valid_form(self):
"""Posting valid data to the form sets session variables and redirects appropriately."""
response = self.client.post(self.get_url(), {'tag': self.tag.pk, 'study': self.study.pk})
# Check session variables.
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('study_pk', session_info)
self.assertEqual(session_info['study_pk'], self.study.pk)
self.assertIn('tag_pk', session_info)
self.assertEqual(session_info['tag_pk'], self.tag.pk)
self.assertIn('tagged_trait_pks', session_info)
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} not in session tagged_trait_pks'.format(tt.pk))
# The success url redirects again to a new page, so include the target_status_code argument.
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_session_variable_tagged_with_tag(self):
"""Posting valid data to the form sets tagged_trait_pks to only those from the given tag."""
other_tag = factories.TagFactory.create()
other_tagged_trait = factories.TaggedTraitFactory.create(
tag=other_tag,
trait__source_dataset__source_study_version__study=self.study
)
response = self.client.post(self.get_url(), {'tag': self.tag.pk, 'study': self.study.pk})
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(other_tagged_trait, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly in session tagged_trait_pks'.format(tt.pk))
def test_session_variable_tagged_with_study(self):
"""Posting valid data to the form sets tagged_trait_pks to only those from the given study."""
other_study = StudyFactory.create()
other_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag,
trait__source_dataset__source_study_version__study=other_study
)
response = self.client.post(self.get_url(), {'tag': self.tag.pk, 'study': self.study.pk})
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(other_tagged_trait, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly in session tagged_trait_pks'.format(tt.pk))
def test_session_variable_tagged_with_study_and_tag(self):
"""Posting valid data to the form sets tagged_trait_pks to only those from the given study and tag."""
other_tag = factories.TagFactory.create()
other_study = StudyFactory.create()
other_tagged_trait = factories.TaggedTraitFactory.create(
tag=other_tag,
trait__source_dataset__source_study_version__study=other_study
)
response = self.client.post(self.get_url(), {'tag': self.tag.pk, 'study': self.study.pk})
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(other_tagged_trait, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly in session tagged_trait_pks'.format(tt.pk))
def test_error_no_unreviewed_tagged_traits_with_study_and_tag(self):
"""Form has non-field error if there are no unreviewed tagged traits for this study with this tag."""
study = StudyFactory.create()
tag = factories.TagFactory.create()
# Other unreviewed tagged traits for this tag must exist or you'll get an error on the tags field.
other_study_unreviewed_tagged_trait = factories.TaggedTraitFactory.create(tag=tag)
response = self.client.post(self.get_url(), {'tag': tag.pk, 'study': study.pk})
self.assertEqual(response.status_code, 200)
# Form errors.
self.assertIn('form', response.context)
self.assertFormError(response, 'form', None, forms.DCCReviewTagAndStudySelectForm.ERROR_NO_TAGGED_TRAITS)
# Make sure no variables were set.
session = self.client.session
self.assertNotIn('tagged_trait_review_by_tag_and_study_info', session)
def test_error_with_no_tagged_traits_for_tag(self):
"""Form has error on tags if selecting a tag without any tagged traits."""
study = StudyFactory.create()
tag = factories.TagFactory.create()
response = self.client.post(self.get_url(), {'tag': tag.pk, 'study': study.pk})
self.assertEqual(response.status_code, 200)
self.assertIn('form', response.context)
self.assertFormError(response, 'form', 'tag',
'Select a valid choice. That choice is not one of the available choices.')
# Make sure no session variables were set.
session = self.client.session
self.assertNotIn('tagged_trait_review_by_tag_and_study_info', session)
def test_error_with_tag_with_completed_review(self):
"""Form has error on tags if selecting a tag without any unreviewed tagged traits."""
study = StudyFactory.create()
tag = factories.TagFactory.create()
reviewed_tagged_trait = factories.TaggedTraitFactory.create(tag=tag)
factories.DCCReviewFactory.create(tagged_trait=reviewed_tagged_trait)
response = self.client.post(self.get_url(), {'tag': tag.pk, 'study': study.pk})
self.assertEqual(response.status_code, 200)
self.assertIn('form', response.context)
self.assertFormError(response, 'form', 'tag',
'Select a valid choice. That choice is not one of the available choices.')
# Make sure no session variables were set.
session = self.client.session
self.assertNotIn('tagged_trait_review_by_tag_and_study_info', session)
def test_resets_session_variables(self):
"""A preexisting session variable is overwritten with new data upon successful form submission."""
self.client.session['tagged_trait_review_by_tag_and_study_info'] = {
'study_pk': self.study.pk + 1,
'tag_pk': self.tag.pk + 1,
'tagged_trait_pks': [],
}
self.client.session.save()
response = self.client.post(self.get_url(), {'tag': self.tag.pk, 'study': self.study.pk})
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('study_pk', session_info)
self.assertEqual(session_info['study_pk'], self.study.pk)
self.assertIn('tag_pk', session_info)
self.assertEqual(session_info['tag_pk'], self.tag.pk)
self.assertIn('tagged_trait_pks', session_info)
self.assertEqual(len(session_info['tagged_trait_pks']), len(self.tagged_traits))
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} not in session tagged_trait_pks'.format(tt.pk))
def test_link_to_review_views(self):
"""The link to review tagged traits appears on the home page for DCC users."""
url = reverse('home')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, """<a href="{}">""".format(self.get_url()))
def test_no_archived_taggedtraits_in_session_variable(self):
"""Sets session variable, without including archived tagged traits."""
archived_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study, archived=True)
response = self.client.post(self.get_url(), {'tag': self.tag.pk, 'study': self.study.pk})
# Check session variables.
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('study_pk', session_info)
self.assertEqual(session_info['study_pk'], self.study.pk)
self.assertIn('tag_pk', session_info)
self.assertEqual(session_info['tag_pk'], self.tag.pk)
self.assertIn('tagged_trait_pks', session_info)
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(archived_tagged_trait.pk, session_info['tagged_trait_pks'])
# The success url redirects again to a new page, so include the target_status_code argument.
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_no_deprecated_traits_in_session_variable(self):
"""Sets session variable, without including deprecated tagged traits."""
deprecated_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study,
trait__source_dataset__source_study_version__i_is_deprecated=True)
response = self.client.post(self.get_url(), {'tag': self.tag.pk, 'study': self.study.pk})
# Check session variables.
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('study_pk', session_info)
self.assertEqual(session_info['study_pk'], self.study.pk)
self.assertIn('tag_pk', session_info)
self.assertEqual(session_info['tag_pk'], self.tag.pk)
self.assertIn('tagged_trait_pks', session_info)
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(deprecated_tagged_trait.pk, session_info['tagged_trait_pks'])
# The success url redirects again to a new page, so include the target_status_code argument.
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
class DCCReviewByTagAndStudySelectDCCAnalystTest(DCCReviewByTagAndStudySelectDCCTestsMixin, DCCAnalystLoginTestCase):
# Run all tests in DCCReviewByTagAndStudySelectDCCTestsMixin, as a DCC analyst.
pass
class DCCReviewByTagAndStudySelectDCCDeveloperTest(DCCReviewByTagAndStudySelectDCCTestsMixin,
DCCDeveloperLoginTestCase):
# Run all tests in DCCReviewByTagAndStudySelectDCCTestsMixin, as a DCC developer.
pass
class DCCReviewByTagAndStudySelectOtherUserTest(UserLoginTestCase):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:dcc-review:select', args=args)
def test_forbidden_get_request(self):
"""Get returns forbidden status code for non-DCC users."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_forbidden_post_request(self):
"""Post returns forbidden status code for non-DCC users."""
response = self.client.post(self.get_url(), {})
self.assertEqual(response.status_code, 403)
def test_link_not_in_navbar(self):
"""The link to review tagged traits doesn't appear on the home page for non-DCC users."""
url = reverse('home')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, """<a href="{}">""".format(self.get_url()))
class DCCReviewByTagAndStudySelectFromURLDCCTestsMixin(object):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.study = StudyFactory.create()
self.tagged_traits = factories.TaggedTraitFactory.create_batch(
10,
tag=self.tag,
trait__source_dataset__source_study_version__study=self.study
)
def get_url(self, *args):
return reverse('tags:tag:study:begin-dcc-review', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk), follow=False)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), fetch_redirect_response=False)
def test_nonexistent_study_404(self):
"""Returns 404 if study does not exist."""
study_pk = self.study.pk
self.study.delete()
response = self.client.get(self.get_url(self.tag.pk, study_pk), follow=False)
self.assertEqual(response.status_code, 404)
def test_nonexistent_tag_404(self):
"""Returns 404 if tag does not exist."""
tag_pk = self.tag.pk
self.tag.delete()
response = self.client.get(self.get_url(tag_pk, self.study.pk), follow=False)
self.assertEqual(response.status_code, 404)
def test_sets_session_variables(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk), follow=False)
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('study_pk', session_info)
self.assertEqual(session_info['study_pk'], self.study.pk)
self.assertIn('tag_pk', session_info)
self.assertEqual(session_info['tag_pk'], self.tag.pk)
self.assertIn('tagged_trait_pks', session_info)
self.assertEqual(len(session_info['tagged_trait_pks']), len(self.tagged_traits))
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} not in session tagged_trait_pks'.format(tt.pk))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), fetch_redirect_response=False)
def test_only_tagged_traits_from_requested_tag(self):
"""tagged_trait_pks is set to only those from the given tag."""
other_tag = factories.TagFactory.create()
other_tagged_trait = factories.TaggedTraitFactory.create(
tag=other_tag,
trait__source_dataset__source_study_version__study=self.study
)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk), follow=False)
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertEqual(len(session_info['tagged_trait_pks']), len(self.tagged_traits))
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(other_tagged_trait, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly in session tagged_trait_pks'.format(tt.pk))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), fetch_redirect_response=False)
def test_only_tagged_traits_from_requested_study(self):
"""tagged_trait_pks is set to only those from the given study."""
other_study = StudyFactory.create()
other_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag,
trait__source_dataset__source_study_version__study=other_study
)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk), follow=False)
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertEqual(len(session_info['tagged_trait_pks']), len(self.tagged_traits))
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(other_tagged_trait, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly in session tagged_trait_pks'.format(tt.pk))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), fetch_redirect_response=False)
def test_session_variable_tagged_with_study_and_tag(self):
"""tagged_trait_pks is set to only those from the given study and tag."""
other_tag = factories.TagFactory.create()
other_study = StudyFactory.create()
other_tagged_trait = factories.TaggedTraitFactory.create(
tag=other_tag,
trait__source_dataset__source_study_version__study=other_study
)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(other_tagged_trait, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly in session tagged_trait_pks'.format(tt.pk))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), fetch_redirect_response=False)
def test_resets_session_variables(self):
"""A preexisting session variable is overwritten with new data."""
self.client.session['tagged_trait_review_by_tag_and_study_info'] = {
'study_pk': self.study.pk + 1,
'tag_pk': self.tag.pk + 1,
'tagged_trait_pks': [],
}
self.client.session.save()
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('study_pk', session_info)
self.assertEqual(session_info['study_pk'], self.study.pk)
self.assertIn('tag_pk', session_info)
self.assertEqual(session_info['tag_pk'], self.tag.pk)
self.assertIn('tagged_trait_pks', session_info)
self.assertEqual(len(session_info['tagged_trait_pks']), len(self.tagged_traits))
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} not in session tagged_trait_pks'.format(tt.pk))
def test_continue_reviewing_link_in_navbar_after_successful_load(self):
"""The link to continue reviewing appears in the navbar after loading this page."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
url = reverse('home')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, """<a href="{}">""".format(reverse('tags:tagged-traits:dcc-review:next')))
def test_redirects_with_message_for_no_tagged_traits_to_review(self):
"""Redirects and displays message when there are no tagged traits to review for the tag+study."""
models.TaggedTrait.objects.all().delete()
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertEqual(response.status_code, 302)
# Check for message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('No tagged variables to review', str(messages[0]))
def test_no_archived_taggedtraits_in_session_variable(self):
"""Does not include archived tagged traits in session variables."""
archived_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study, archived=True)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
# Check session variables.
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('study_pk', session_info)
self.assertEqual(session_info['study_pk'], self.study.pk)
self.assertIn('tag_pk', session_info)
self.assertEqual(session_info['tag_pk'], self.tag.pk)
self.assertIn('tagged_trait_pks', session_info)
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(archived_tagged_trait.pk, session_info['tagged_trait_pks'])
# The success url redirects again to a new page, so include the target_status_code argument.
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_no_deprecated_traits_in_session_variable(self):
"""Sets session variable, without including deprecated tagged traits."""
deprecated_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study,
trait__source_dataset__source_study_version__i_is_deprecated=True)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
# Check session variables.
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('study_pk', session_info)
self.assertEqual(session_info['study_pk'], self.study.pk)
self.assertIn('tag_pk', session_info)
self.assertEqual(session_info['tag_pk'], self.tag.pk)
self.assertIn('tagged_trait_pks', session_info)
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(deprecated_tagged_trait.pk, session_info['tagged_trait_pks'])
# The success url redirects again to a new page, so include the target_status_code argument.
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
class DCCReviewByTagAndStudySelectFromURLDCCAnalystTest(DCCReviewByTagAndStudySelectFromURLDCCTestsMixin,
DCCAnalystLoginTestCase):
# Run all tests in DCCReviewByTagAndStudySelectFromURLDCCTestsMixin as a DCC analyst.
pass
class DCCReviewByTagAndStudySelectFromURLDCCDeveloperTest(DCCReviewByTagAndStudySelectFromURLDCCTestsMixin,
DCCDeveloperLoginTestCase):
# Run all tests in DCCReviewByTagAndStudySelectFromURLDCCTestsMixin as a DCC developer.
pass
class DCCReviewByTagAndStudySelectFromURLOtherUserTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.study = StudyFactory.create()
self.tagged_traits = factories.TaggedTraitFactory.create_batch(
10,
tag=self.tag,
trait__source_dataset__source_study_version__study=self.study
)
def get_url(self, *args):
return reverse('tags:tag:study:begin-dcc-review', args=args)
def test_forbidden_get_request(self):
"""Get returns forbidden status code for non-DCC users."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertEqual(response.status_code, 403)
class DCCReviewByTagAndStudyNextDCCTestsMixin(object):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:dcc-review:next', args=args)
def test_view_success_with_no_session_variables(self):
"""View redirects correctly when no session variables are set."""
response = self.client.get(self.get_url())
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:select'))
def test_view_success_with_tagged_traits_to_review(self):
"""View redirects correctly when there are tagged traits to review."""
tagged_trait = factories.TaggedTraitFactory.create()
tag = tagged_trait.tag
study = tagged_trait.trait.source_dataset.source_study_version.study
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'] = {
'tag_pk': tag.pk,
'study_pk': study.pk,
'tagged_trait_pks': [tagged_trait.pk],
}
session.save()
response = self.client.get(self.get_url())
# Make sure a pk session variable was set
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
self.assertIn('pk', session['tagged_trait_review_by_tag_and_study_info'])
self.assertEqual(session['tagged_trait_review_by_tag_and_study_info']['pk'], tagged_trait.pk)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:review'))
# Check messages.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('You have 1 tagged variable left to review.', str(messages[0]))
def test_view_success_with_no_tagged_traits_left(self):
"""View redirects correctly when no tagged traits are left to review."""
tag = factories.TagFactory.create()
study = StudyFactory.create()
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'] = {
'tag_pk': tag.pk,
'study_pk': study.pk,
'tagged_trait_pks': [],
}
session.save()
response = self.client.get(self.get_url())
self.assertRedirects(response, reverse('tags:tag:study:list', args=[tag.pk, study.pk]))
# Check that there are no messages.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_session_variables_are_unset_when_reviewing_completed(self):
"""View unsets session variables when no tagged traits are left to review."""
tag = factories.TagFactory.create()
study = StudyFactory.create()
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'] = {
'tag_pk': tag.pk,
'study_pk': study.pk,
'tagged_trait_pks': [],
}
session.save()
response = self.client.get(self.get_url())
self.assertNotIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
def test_skips_tagged_trait_that_has_been_reviewed(self):
"""Skips a tagged trait that has been reviewed after starting the loop."""
tag = factories.TagFactory.create()
study = StudyFactory.create()
tagged_traits = factories.TaggedTraitFactory.create_batch(
2,
tag=tag,
trait__source_dataset__source_study_version__study=study
)
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'] = {
'tag_pk': tag.pk,
'study_pk': study.pk,
'tagged_trait_pks': [x.pk for x in tagged_traits],
}
session.save()
factories.DCCReviewFactory.create(tagged_trait=tagged_traits[0])
response = self.client.get(self.get_url())
self.assertIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertEqual(session_info['tagged_trait_pks'], [tagged_traits[1].pk])
self.assertNotIn('pk', session_info)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
# Check that there are no messages.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_skips_deleted_tagged_trait(self):
"""Skips a tagged trait that has been deleted after starting the loop."""
tag = factories.TagFactory.create()
study = StudyFactory.create()
tagged_traits = factories.TaggedTraitFactory.create_batch(
2,
tag=tag,
trait__source_dataset__source_study_version__study=study
)
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'] = {
'tag_pk': tag.pk,
'study_pk': study.pk,
'tagged_trait_pks': [x.pk for x in tagged_traits],
}
session.save()
# Now delete it and try loading the view.
tagged_traits[0].delete()
response = self.client.get(self.get_url())
self.assertIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertEqual(session_info['tagged_trait_pks'], [tagged_traits[1].pk])
self.assertNotIn('pk', session_info)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_skips_archived_tagged_trait(self):
"""Skips a tagged trait that has been archived after starting the loop."""
tag = factories.TagFactory.create()
study = StudyFactory.create()
tagged_traits = factories.TaggedTraitFactory.create_batch(
2,
tag=tag,
trait__source_dataset__source_study_version__study=study
)
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'] = {
'tag_pk': tag.pk,
'study_pk': study.pk,
'tagged_trait_pks': [x.pk for x in tagged_traits],
}
session.save()
# Now archive it and try loading the view.
tagged_traits[0].archive()
response = self.client.get(self.get_url())
self.assertIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertEqual(session_info['tagged_trait_pks'], [tagged_traits[1].pk])
self.assertNotIn('pk', session_info)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_session_variables_are_not_properly_set(self):
"""Redirects to select view if expected session variable is not set."""
response = self.client.get(self.get_url())
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:select'))
def test_session_variable_missing_required_keys(self):
"""Redirects to select view if expected session variable dictionary keys are missing."""
tag = factories.TagFactory.create()
study = StudyFactory.create()
tagged_traits = factories.TaggedTraitFactory.create_batch(
2,
tag=tag,
trait__source_dataset__source_study_version__study=study
)
template = {
'study_pk': study.pk,
'tag_pk': tag.pk,
'tagged_trait_pks': [x.pk for x in tagged_traits]
}
for key in template.keys():
session_info = copy.copy(template)
session_info.pop(key)
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'] = session_info
session.save()
response = self.client.get(self.get_url())
self.assertNotIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:select'),
msg_prefix='did not redirect when missing {} in session'.format(key))
def test_continue_reviewing_link_in_navbar_if_session_variable_is_present(self):
"""The link to continue reviewing traits appears on the home page for DCC users if session variable exists."""
tagged_trait = factories.TaggedTraitFactory.create()
tag = tagged_trait.tag
study = tagged_trait.trait.source_dataset.source_study_version.study
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'] = {
'tag_pk': tag.pk,
'study_pk': study.pk,
'tagged_trait_pks': [tagged_trait.pk],
}
session.save()
url = reverse('home')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, """<a href="{}">""".format(self.get_url()))
def test_continue_reviewing_link_not_in_navbar_if_session_variable_is_missing(self):
"""The link to continue reviewing doesn't appear on the home page for DCC users if no session variable."""
url = reverse('home')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, """<a href="{}">""".format(self.get_url()))
def test_skips_deprecated_tagged_traits(self):
"""Skips a tagged trait that has been deprecated after starting the loop."""
tag = factories.TagFactory.create()
study = StudyFactory.create()
tagged_traits = factories.TaggedTraitFactory.create_batch(
2,
tag=tag,
trait__source_dataset__source_study_version__study=study
)
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'] = {
'tag_pk': tag.pk,
'study_pk': study.pk,
'tagged_trait_pks': [x.pk for x in tagged_traits],
}
session.save()
# Now deprecate one and try loading the view.
study_version = tagged_traits[0].trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
response = self.client.get(self.get_url())
self.assertIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertEqual(session_info['tagged_trait_pks'], [tagged_traits[1].pk])
self.assertNotIn('pk', session_info)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
class DCCReviewByTagAndStudyNextDCCAnalystTest(DCCReviewByTagAndStudyNextDCCTestsMixin, DCCAnalystLoginTestCase):
# Run all tests in DCCReviewByTagAndStudyNextDCCTestsMixin, as a DCC analyst.
pass
class DCCReviewByTagAndStudyNextDCCDeveloperTest(DCCReviewByTagAndStudyNextDCCTestsMixin, DCCDeveloperLoginTestCase):
# Run all tests in DCCReviewByTagAndStudyNextDCCTestsMixin, as a DCC developer.
pass
class DCCReviewByTagAndStudyNextOtherUserTest(UserLoginTestCase):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:dcc-review:next', args=args)
def test_forbidden_get_request(self):
"""Get returns forbidden status code for non-DCC users."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_forbidden_post_request(self):
"""Post returns forbidden status code for non-DCC users."""
response = self.client.post(self.get_url(), {})
self.assertEqual(response.status_code, 403)
def test_continue_reviewing_link_in_navbar_if_session_variable_is_present(self):
"""The link to continue reviewing traits doesn't appear on the home page for non-DCC users."""
tagged_trait = factories.TaggedTraitFactory.create()
tag = tagged_trait.tag
study = tagged_trait.trait.source_dataset.source_study_version.study
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'] = {
'tag_pk': tag.pk,
'study_pk': study.pk,
'tagged_trait_pks': [tagged_trait.pk],
}
session.save()
url = reverse('home')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, """<a href="{}">""".format(self.get_url()))
class DCCReviewByTagAndStudyDCCTestsMixin(object):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.study = StudyFactory.create()
self.tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag,
trait__source_dataset__source_study_version__study=self.study
)
# Set expected session variables.
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'] = {
'study_pk': self.study.pk,
'tag_pk': self.tag.pk,
'tagged_trait_pks': [self.tagged_trait.pk],
'pk': self.tagged_trait.pk,
}
session.save()
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:dcc-review:review', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('form', context)
self.assertIsInstance(context['form'], forms.DCCReviewByTagAndStudyForm)
self.assertIn('tagged_trait', context)
self.assertEqual(context['tagged_trait'], self.tagged_trait)
self.assertIn('tag', context)
self.assertEqual(context['tag'], self.tag)
self.assertIn('study', context)
self.assertEqual(context['study'], self.study)
self.assertIn('n_tagged_traits_remaining', context)
self.assertEqual(context['n_tagged_traits_remaining'], 1)
def test_context_data_with_multiple_remaining_tagged_traits(self):
"""View has appropriate data in the context if there are multiple tagged traits to review."""
session = self.client.session
info = session['tagged_trait_review_by_tag_and_study_info']
info['tagged_trait_pks'] = [self.tagged_trait.pk, self.tagged_trait.pk + 1]
session.save()
response = self.client.get(self.get_url())
context = response.context
self.assertIn('form', context)
self.assertIsInstance(context['form'], forms.DCCReviewByTagAndStudyForm)
self.assertIn('tagged_trait', context)
self.assertEqual(context['tagged_trait'], self.tagged_trait)
self.assertIn('tag', context)
self.assertEqual(context['tag'], self.tag)
self.assertIn('study', context)
self.assertEqual(context['study'], self.study)
self.assertIn('n_tagged_traits_remaining', context)
self.assertEqual(context['n_tagged_traits_remaining'], 2)
def test_successful_post_with_confirmed_tagged_trait(self):
"""Posting valid data to the form correctly creates a DCCReview."""
form_data = {forms.DCCReviewByTagAndStudyForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Correctly creates a DCCReview for this TaggedTrait.
dcc_review = models.DCCReview.objects.latest('created')
self.assertEqual(self.tagged_trait.dcc_review, dcc_review)
# The pk session variable is correctly unset.
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully reviewed', str(messages[0]))
# Correctly redirects to the next view (remembering that it is a redirect view).
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_successful_post_with_needs_followup_tagged_trait(self):
"""Posting valid data to the form correctly creates a DCCReview."""
form_data = {forms.DCCReviewByTagAndStudyForm.SUBMIT_FOLLOWUP: 'Require study followup', 'comment': 'foo'}
response = self.client.post(self.get_url(), form_data)
# Correctly creates a DCCReview for this TaggedTrait.
dcc_review = models.DCCReview.objects.latest('created')
self.assertEqual(self.tagged_trait.dcc_review, dcc_review)
# The pk session variable is correctly unset.
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully reviewed', str(messages[0]))
# Correctly redirects to the next view (remembering that it is a redirect view).
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_error_missing_comment(self):
"""Posting bad data to the form shows a form error and doesn't unset session variables."""
form_data = {forms.DCCReviewByTagAndStudyForm.SUBMIT_FOLLOWUP: 'Require study followup', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'comment',
'Comment cannot be blank for tagged variables that require followup.')
# Does not create a DCCReview for this TaggedTrait.
self.assertFalse(hasattr(self.tagged_trait, 'dcc_review'))
# The pk session variable is not unset.
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertIn('pk', session_info)
self.assertEqual(session_info['pk'], self.tagged_trait.pk)
# No messages.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_skip_tagged_trait(self):
"""Skipping a TaggedTrait unsets pk and redirects to the next view."""
form_data = {forms.DCCReviewByTagAndStudyForm.SUBMIT_SKIP: 'Skip'}
response = self.client.post(self.get_url(), form_data)
# Does not create a DCCReview for this TaggedTrait.
self.assertFalse(hasattr(self.tagged_trait, 'dcc_review'))
# Session variables are properly set/unset.
session = self.client.session
self.assertIn('tagged_trait_review_by_tag_and_study_info', session)
session_info = session['tagged_trait_review_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# The redirect view unsets some session variables, so check it at the end.
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_non_existent_tagged_trait(self):
"""Returns 404 if the tagged trait for the session variable pk doesn't exist."""
self.tagged_trait.delete()
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
def test_already_reviewed_tagged_trait(self):
"""Shows warning message and does not save review if TaggedTrait is already reviewed."""
dcc_review = factories.DCCReviewFactory.create(
tagged_trait=self.tagged_trait,
status=models.DCCReview.STATUS_FOLLOWUP
)
# Now try to review it through the web interface.
form_data = {forms.DCCReviewByTagAndStudyForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_review_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('already been reviewed', str(messages[0]))
# The previous DCCReview was not updated.
self.assertEqual(self.tagged_trait.dcc_review, dcc_review)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_archived_tagged_trait(self):
"""Shows warning message and does not save review if TaggedTrait is archived."""
self.tagged_trait.archive()
# Now try to review it through the web interface.
form_data = {forms.DCCReviewByTagAndStudyForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_review_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('been archived', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_deprecated_trait(self):
"""Shows warning message and does not save review if SourceTrait is deprecated."""
study_version = self.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
# Now try to review it through the web interface.
form_data = {forms.DCCReviewByTagAndStudyForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_review_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
self.tagged_trait.refresh_from_db()
self.assertFalse(hasattr(self.tagged_trait, 'dcc_review'))
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('newer version', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_already_reviewed_tagged_trait_with_form_error(self):
"""Shows warning message and redirects if TaggedTrait is already reviewed, even if there's a form error."""
dcc_review = factories.DCCReviewFactory.create(
tagged_trait=self.tagged_trait,
status=models.DCCReview.STATUS_CONFIRMED,
comment=''
)
# Now try to review it through the web interface.
form_data = {forms.DCCReviewByTagAndStudyForm.SUBMIT_FOLLOWUP: 'Require study followup', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_review_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('already been reviewed', str(messages[0]))
# The previous DCCReview was not updated.
self.assertEqual(self.tagged_trait.dcc_review, dcc_review)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_archived_tagged_trait_with_form_error(self):
"""Shows warning message and redirects if TaggedTrait is archived, even if there's a form error."""
self.tagged_trait.archive()
# Now try to review it through the web interface.
form_data = {forms.DCCReviewByTagAndStudyForm.SUBMIT_FOLLOWUP: 'Require study followup', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_review_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('been archived', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_can_skip_already_reviewed_tagged_trait(self):
"""Redirects without a message if an already-reviewed tagged trait is skipped."""
dcc_review = factories.DCCReviewFactory.create(
tagged_trait=self.tagged_trait,
status=models.DCCReview.STATUS_CONFIRMED,
comment=''
)
# Now try to review it through the web interface.
form_data = {forms.DCCReviewByTagAndStudyForm.SUBMIT_SKIP: 'Skip', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_review_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check that no message was generated.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
# The previous DCCReview was not updated.
self.assertEqual(self.tagged_trait.dcc_review, dcc_review)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_get_session_variables_are_not_properly_set(self):
"""Redirects to select view if expected session variable is not set."""
session = self.client.session
del session['tagged_trait_review_by_tag_and_study_info']
session.save()
response = self.client.get(self.get_url())
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:select'))
def test_post_session_variables_are_not_properly_set(self):
"""Redirects to select view if expected session variable is not set."""
session = self.client.session
del session['tagged_trait_review_by_tag_and_study_info']
session.save()
response = self.client.post(self.get_url(), {})
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:select'))
def test_get_session_variable_missing_key_tag_pk(self):
"""Redirects to select view if tag_pk is missing from session variable keys."""
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'].pop('tag_pk')
session.save()
response = self.client.get(self.get_url())
self.assertNotIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:select'))
def test_get_session_variable_missing_key_study_pk(self):
"""Redirects to select view if study_pk is missing from session variable keys."""
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'].pop('study_pk')
session.save()
response = self.client.get(self.get_url())
self.assertNotIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:select'))
def test_get_session_variable_missing_key_tagged_trait_pks(self):
"""Redirects to select view if tagged_trait_pks is missing from session variable keys."""
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'].pop('tagged_trait_pks')
session.save()
response = self.client.get(self.get_url())
self.assertNotIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:select'))
def test_get_session_variable_missing_key_pk(self):
"""Redirects to select view if pk is missing from session variable keys."""
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'].pop('pk')
session.save()
response = self.client.get(self.get_url())
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_post_session_variable_missing_key_tag_pk(self):
"""Redirects to select view if tag_pk is missing from session variable keys."""
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'].pop('tag_pk')
session.save()
response = self.client.post(self.get_url(), {})
self.assertNotIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:select'))
def test_post_session_variable_missing_key_study_pk(self):
"""Redirects to select view if study_pk is missing from session variable keys."""
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'].pop('study_pk')
session.save()
response = self.client.post(self.get_url(), {})
self.assertNotIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:select'))
def test_post_session_variable_missing_key_tagged_trait_pks(self):
"""Redirects to select view if tagged_trait_pks is missing from session variable keys."""
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'].pop('tagged_trait_pks')
session.save()
response = self.client.post(self.get_url(), {})
self.assertNotIn('tagged_trait_review_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:select'))
def test_post_session_variable_missing_key_pk(self):
"""Redirects to select view if pk is missing from session variable keys."""
session = self.client.session
session['tagged_trait_review_by_tag_and_study_info'].pop('pk')
session.save()
response = self.client.post(self.get_url(), {})
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-review:next'), target_status_code=302)
def test_shows_other_tags(self):
"""Other tags linked to the same trait are included in the page."""
another_tagged_trait = factories.TaggedTraitFactory.create(trait=self.tagged_trait.trait)
response = self.client.get(self.get_url())
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_trait.tag.title, content)
def test_shows_archived_other_tags(self):
"""Other tags linked to the same trait are included in the page."""
another_tagged_trait = factories.TaggedTraitFactory.create(trait=self.tagged_trait.trait, archived=True)
response = self.client.get(self.get_url())
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_trait.tag.title, content)
def test_shows_tag_only_once_when_it_is_archived(self):
"""The tag is only shown once, even when the tagged variable is archived."""
self.tagged_trait.archive()
response = self.client.get(self.get_url())
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertNotIn(self.tagged_trait.tag, context['other_tags'])
self.assertNotIn(self.tagged_trait.tag, context['archived_other_tags'])
class DCCReviewByTagAndStudyDCCAnalystTest(DCCReviewByTagAndStudyDCCTestsMixin, DCCAnalystLoginTestCase):
# Run all tests in DCCReviewByTagAndStudyDCCTestsMixin, as a DCC analyst.
pass
class DCCReviewByTagAndStudyDCCDeveloperTest(DCCReviewByTagAndStudyDCCTestsMixin, DCCDeveloperLoginTestCase):
# Run all tests in DCCReviewByTagAndStudyDCCTestsMixin, as a DCC developer.
pass
class DCCReviewByTagAndStudyOtherUserTest(UserLoginTestCase):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:dcc-review:review', args=args)
def test_forbidden_get_request(self):
"""Get returns forbidden status code for non-DCC users."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_forbidden_post_request(self):
"""Post returns forbidden status code for non-DCC users."""
response = self.client.post(self.get_url(), {})
self.assertEqual(response.status_code, 403)
def test_link_not_in_navbar(self):
"""The link to continue reviewing traits doesn't appear on the home page for non-DCC users."""
url = reverse('home')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, """<a href="{}">""".format(self.get_url()))
class DCCReviewCreateDCCTestsMixin(object):
def setUp(self):
super().setUp()
self.tagged_trait = factories.TaggedTraitFactory.create()
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:pk:dcc-review:new', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertIn('form', context)
self.assertIsInstance(context['form'], forms.DCCReviewForm)
self.assertIn('tagged_trait', context)
self.assertEqual(context['tagged_trait'], self.tagged_trait)
def test_successful_post_with_confirmed_tagged_trait(self):
"""Posting valid data to the form correctly creates a confirmed DCCReview."""
form_data = {forms.DCCReviewForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
# Correctly creates a DCCReview for this TaggedTrait.
dcc_review = models.DCCReview.objects.latest('created')
self.assertEqual(self.tagged_trait.dcc_review, dcc_review)
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully reviewed', str(messages[0]))
def test_successful_post_with_needs_followup_tagged_trait(self):
"""Posting valid data to the form correctly creates a 'need followup' DCCReview."""
form_data = {forms.DCCReviewForm.SUBMIT_FOLLOWUP: 'Require study followup', 'comment': 'foo'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
# Correctly creates a DCCReview for this TaggedTrait.
dcc_review = models.DCCReview.objects.latest('created')
self.assertEqual(self.tagged_trait.dcc_review, dcc_review)
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully reviewed', str(messages[0]))
def test_form_error_with_missing_comment(self):
"""Posting bad data to the form shows a form error."""
form_data = {forms.DCCReviewForm.SUBMIT_FOLLOWUP: 'Require study followup', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'comment',
'Comment cannot be blank for tagged variables that require followup.')
# Does not create a DCCReview for this TaggedTrait.
self.assertFalse(hasattr(self.tagged_trait, 'dcc_review'))
# No messages.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_get_non_existent_tagged_trait(self):
"""Get returns 404 if the tagged trait doesn't exist."""
url = self.get_url(self.tagged_trait.pk)
self.tagged_trait.delete()
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_post_non_existent_tagged_trait(self):
"""Post returns 404 if the session varaible pk doesn't exist."""
url = self.get_url(self.tagged_trait.pk)
self.tagged_trait.delete()
form_data = {forms.DCCReviewForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data)
self.assertEqual(response.status_code, 404)
def test_get_already_reviewed_tagged_trait(self):
"""Shows warning message and redirects to update page if TaggedTrait is already reviewed."""
dcc_review = factories.DCCReviewFactory.create(
tagged_trait=self.tagged_trait,
status=models.DCCReview.STATUS_FOLLOWUP
)
# Now try to review it through the web interface.
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertRedirects(response, reverse('tags:tagged-traits:pk:dcc-review:update', args=[self.tagged_trait.pk]))
# Check for warning message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('already been reviewed', str(messages[0]))
# The previous DCCReview was not updated.
self.assertEqual(self.tagged_trait.dcc_review, dcc_review)
def test_post_already_reviewed_tagged_trait(self):
"""Shows warning message and does not save review if TaggedTrait is already reviewed."""
dcc_review = factories.DCCReviewFactory.create(
tagged_trait=self.tagged_trait,
status=models.DCCReview.STATUS_FOLLOWUP
)
# Now try to review it through the web interface.
form_data = {forms.DCCReviewForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, reverse('tags:tagged-traits:pk:dcc-review:update', args=[self.tagged_trait.pk]))
# Check for warning message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('already been reviewed', str(messages[0]))
# The previous DCCReview was not updated.
self.assertEqual(self.tagged_trait.dcc_review, dcc_review)
def test_post_already_reviewed_tagged_trait_with_form_error(self):
"""Shows warning message and redirects if TaggedTrait is already reviewed."""
dcc_review = factories.DCCReviewFactory.create(
tagged_trait=self.tagged_trait,
status=models.DCCReview.STATUS_FOLLOWUP
)
# Now try to review it through the web interface.
form_data = {forms.DCCReviewForm.SUBMIT_FOLLOWUP: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, reverse('tags:tagged-traits:pk:dcc-review:update', args=[self.tagged_trait.pk]))
# Check for warning message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('already been reviewed', str(messages[0]))
# The previous DCCReview was not updated.
self.assertEqual(self.tagged_trait.dcc_review, dcc_review)
def test_get_archived_tagged_trait(self):
"""Get redirects with an error message if the tagged trait is archived."""
self.tagged_trait.archive()
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url)
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('been archived', str(messages[0]))
def test_post_archived_tagged_trait(self):
"""Post redirects with an error message if the tagged trait is archived."""
self.tagged_trait.archive()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCReviewForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data)
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('been archived', str(messages[0]))
def test_get_deprecated_tagged_trait(self):
"""Get redirects with an error message if the trait is deprecated."""
study_version = self.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url)
self.tagged_trait.refresh_from_db()
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('newer version', str(messages[0]))
def test_post_deprecated_tagged_trait(self):
"""Post redirects with an error message if the trait is deprecated."""
study_version = self.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCReviewForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data)
self.assertFalse(hasattr(self.tagged_trait, 'dcc_review'))
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('newer version', str(messages[0]))
def test_shows_other_tags(self):
"""Other tags linked to the same trait are included in the page."""
another_tagged_trait = factories.TaggedTraitFactory.create(trait=self.tagged_trait.trait)
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_trait.tag.title, content)
def test_shows_archived_other_tags(self):
"""Other tags linked to the same trait are included in the page."""
another_tagged_trait = factories.TaggedTraitFactory.create(trait=self.tagged_trait.trait, archived=True)
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_trait.tag.title, content)
class DCCReviewCreateDCCAnalystTest(DCCReviewCreateDCCTestsMixin, DCCAnalystLoginTestCase):
# Run all tests in DCCReviewCreateDCCTestsMixin, as a DCC analyst.
pass
class DCCReviewCreateDCCDeveloperTest(DCCReviewCreateDCCTestsMixin, DCCDeveloperLoginTestCase):
# Run all tests in DCCReviewCreateDCCTestsMixin, as a DCC developer.
pass
class DCCReviewCreateOtherUserTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.tagged_trait = factories.TaggedTraitFactory.create()
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:pk:dcc-review:new', args=args)
def test_forbidden_get_request(self):
"""Get returns forbidden status code for non-DCC users."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_post_request(self):
"""Post returns forbidden status code for non-DCC users."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertEqual(response.status_code, 403)
def test_forbidden_get_request_with_existing_review(self):
"""Get returns forbidden status code for non-DCC user when review exists."""
factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_CONFIRMED)
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_post_request_with_existing_review(self):
"""Post returns forbidden status code for non-DCC user when review exists."""
factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_CONFIRMED)
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertEqual(response.status_code, 403)
class DCCReviewUpdateDCCTestsMixin(object):
def setUp(self):
super().setUp()
self.tagged_trait = factories.TaggedTraitFactory.create()
factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait)
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:pk:dcc-review:update', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertIn('form', context)
self.assertIsInstance(context['form'], forms.DCCReviewForm)
self.assertIn('tagged_trait', context)
self.assertEqual(context['tagged_trait'], self.tagged_trait)
def test_successful_post_with_confirmed_tagged_trait(self):
"""Posting valid data to the form correctly updates an existing DCCReview."""
self.tagged_trait.dcc_review.delete()
factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
form_data = {forms.DCCReviewForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
# Correctly updates the DCCReview for this TaggedTrait.
self.tagged_trait.dcc_review.refresh_from_db()
self.assertEqual(self.tagged_trait.dcc_review.status, models.DCCReview.STATUS_CONFIRMED)
self.assertEqual(self.tagged_trait.dcc_review.comment, '')
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully updated', str(messages[0]))
def test_successful_post_with_needs_followup_tagged_trait(self):
"""Posting valid data to the form correctly updates a DCCReview."""
comment = 'a new comment'
form_data = {forms.DCCReviewForm.SUBMIT_FOLLOWUP: 'Confirm', 'comment': comment}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
# Correctly updates the DCCReview for this TaggedTrait.
self.tagged_trait.dcc_review.refresh_from_db()
self.assertEqual(self.tagged_trait.dcc_review.status, models.DCCReview.STATUS_FOLLOWUP)
self.assertEqual(self.tagged_trait.dcc_review.comment, comment)
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully updated', str(messages[0]))
def test_form_error_with_missing_comment(self):
"""Posting bad data to the form shows a form error."""
existing_review = self.tagged_trait.dcc_review
form_data = {forms.DCCReviewForm.SUBMIT_FOLLOWUP: 'Require study followup', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'comment',
'Comment cannot be blank for tagged variables that require followup.')
# Does not update the DCCReview for this TaggedTrait.
self.tagged_trait.dcc_review.refresh_from_db()
self.assertEqual(self.tagged_trait.dcc_review, existing_review)
# No messages.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_get_non_existent_tagged_trait(self):
"""Get returns a 404 page if the tagged trait doesn't exist."""
url = self.get_url(self.tagged_trait.pk)
self.tagged_trait.hard_delete()
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_post_non_existent_tagged_trait(self):
"""Post returns a 404 page if the tagged trait doesn't exist."""
url = self.get_url(self.tagged_trait.pk)
self.tagged_trait.hard_delete()
form_data = {forms.DCCReviewForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data)
self.assertEqual(response.status_code, 404)
def test_get_nonexistent_dcc_review(self):
"""Get redirects to the create view with a warning if the DCCReview doesn't exist."""
self.tagged_trait.dcc_review.delete()
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertRedirects(response, reverse('tags:tagged-traits:pk:dcc-review:new', args=[self.tagged_trait.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('has not been reviewed yet', str(messages[0]))
def test_post_nonexistent_dcc_review(self):
"""Post redirects to the create view with a warning if the DCCReview doesn't exist."""
self.tagged_trait.dcc_review.delete()
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertRedirects(response, reverse('tags:tagged-traits:pk:dcc-review:new', args=[self.tagged_trait.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('has not been reviewed yet', str(messages[0]))
def test_get_archived_tagged_trait(self):
"""Get redirects to detail page if the tagged trait is archived."""
self.tagged_trait.archive()
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url)
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('archived', str(messages[0]))
def test_post_archived_tagged_trait(self):
"""Post redirects to detail page if the tagged trait is archived."""
self.tagged_trait.archive()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCReviewForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data)
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('archived', str(messages[0]))
def test_get_deprecated_tagged_trait(self):
"""Get redirects to detail page if the tagged trait is deprecated."""
study_version = self.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url)
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('newer version', str(messages[0]))
def test_post_deprecated_tagged_trait(self):
"""Post redirects to detail page if the tagged trait is deprecated."""
study_version = self.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCReviewForm.SUBMIT_FOLLOWUP: 'Require study followup', 'comment': 'new test comment'}
response = self.client.post(url, form_data)
self.tagged_trait.refresh_from_db()
self.assertTrue(self.tagged_trait.dcc_review.comment != 'new test comment')
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('newer version', str(messages[0]))
def test_cant_update_dcc_review_if_study_has_responded(self):
"""Post redirects with a message if the study has responded."""
self.tagged_trait.dcc_review.delete()
dcc_review = factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait,
status=models.DCCReview.STATUS_FOLLOWUP)
factories.StudyResponseFactory.create(dcc_review=dcc_review)
form_data = {forms.DCCReviewForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
# Did not update the DCCReview for this TaggedTrait.
self.tagged_trait.dcc_review.refresh_from_db()
self.assertEqual(self.tagged_trait.dcc_review.status, models.DCCReview.STATUS_FOLLOWUP)
# Check for error message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Oops!', str(messages[0]))
def test_get_redirect_if_study_has_responded(self):
"""Redirects with a message if the study has responded."""
self.tagged_trait.dcc_review.delete()
dcc_review = factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait,
status=models.DCCReview.STATUS_FOLLOWUP)
factories.StudyResponseFactory.create(dcc_review=dcc_review)
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
# Check for error message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Oops!', str(messages[0]))
def test_cant_update_dcc_review_if_dcc_decision_exists(self):
"""Posting data redirects with a message if a dcc decision exists."""
self.tagged_trait.dcc_review.delete()
dcc_review = factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait,
status=models.DCCReview.STATUS_FOLLOWUP)
factories.DCCDecisionFactory.create(dcc_review=dcc_review)
form_data = {forms.DCCReviewForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
# Did not update the DCCReview for this TaggedTrait.
self.tagged_trait.dcc_review.refresh_from_db()
self.assertEqual(self.tagged_trait.dcc_review.status, models.DCCReview.STATUS_FOLLOWUP)
# Check for error message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Oops!', str(messages[0]))
def test_get_redirect_if_dcc_decision_exists(self):
"""Loading the page redirects with a message if the study has responded."""
self.tagged_trait.dcc_review.delete()
dcc_review = factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait,
status=models.DCCReview.STATUS_FOLLOWUP)
factories.DCCDecisionFactory.create(dcc_review=dcc_review)
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertRedirects(response, self.tagged_trait.get_absolute_url())
# Check for error message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Oops!', str(messages[0]))
def test_shows_other_tags(self):
"""Other tags linked to the same trait are included in the page."""
another_tagged_trait = factories.TaggedTraitFactory.create(trait=self.tagged_trait.trait)
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_trait.tag.title, content)
def test_shows_archived_other_tags(self):
"""Other tags linked to the same trait are included in the page."""
another_tagged_trait = factories.TaggedTraitFactory.create(trait=self.tagged_trait.trait, archived=True)
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_trait.tag.title, content)
class DCCReviewUpdateDCCAnalystTest(DCCReviewUpdateDCCTestsMixin, DCCAnalystLoginTestCase):
# Run all tests in DCCReviewDCCTestsMixin, as a DCC analyst.
pass
class DCCReviewUpdateDCCDeveloperTest(DCCReviewUpdateDCCTestsMixin, DCCDeveloperLoginTestCase):
# Run all tests in DCCReviewDCCTestsMixin, as a DCC developer.
pass
class DCCReviewUpdateOtherUserTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.tagged_trait = factories.TaggedTraitFactory.create()
factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait)
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:pk:dcc-review:update', args=args)
def test_forbidden_get_request(self):
"""Get returns forbidden status code for non-DCC users."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_post_request(self):
"""Post returns forbidden status code for non-DCC users."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertEqual(response.status_code, 403)
class TaggedTraitsNeedStudyResponseSummaryPhenotypeTaggerTest(PhenotypeTaggerLoginTestCase):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:quality-review', args=args)
def test_view_success(self):
"""Returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_get_context_data_one_study_with_no_need_followup_traits(self):
"""Counts are correct with no TaggedTraits."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 0)
def test_get_context_data_one_study_one_tagged_trait_with_study_response(self):
"""Count include TaggedTraits that have a study response in the tt_completed_count field."""
tag = factories.TagFactory.create()
factories.StudyResponseFactory.create(
dcc_review__tagged_trait__tag=tag,
dcc_review__status=models.DCCReview.STATUS_FOLLOWUP,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study
)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1)
self.assertEqual(counts[0][0]['study_pk'], self.study.pk)
self.assertEqual(len(counts[0][1]), 1)
self.assertEqual(counts[0][1][0]['tag_pk'], tag.pk)
self.assertEqual(counts[0][1][0]['tt_remaining_count'], 0)
self.assertEqual(counts[0][1][0]['tt_completed_count'], 1)
def test_get_context_data_one_study_with_one_need_followup_tagged_trait(self):
"""Counts are correct with one TaggedTrait that needs followup."""
tag = factories.TagFactory.create()
factories.DCCReviewFactory.create(
tagged_trait__tag=tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1)
self.assertEqual(counts[0][0]['study_pk'], self.study.pk)
self.assertEqual(len(counts[0][1]), 1)
self.assertEqual(counts[0][1][0]['tag_pk'], tag.pk)
self.assertEqual(counts[0][1][0]['tt_remaining_count'], 1)
self.assertEqual(counts[0][1][0]['tt_completed_count'], 0)
def test_get_context_data_one_study_with_two_need_followup_tagged_traits(self):
"""Counts are correct with two TaggedTraits that need followup."""
tag = factories.TagFactory.create()
factories.DCCReviewFactory.create_batch(
2,
tagged_trait__tag=tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1)
self.assertEqual(counts[0][0]['study_pk'], self.study.pk)
self.assertEqual(len(counts[0][1]), 1)
self.assertEqual(counts[0][1][0]['tag_pk'], tag.pk)
self.assertEqual(counts[0][1][0]['tt_remaining_count'], 2)
self.assertEqual(counts[0][1][0]['tt_completed_count'], 0)
def test_get_context_data_archived_in_tt_completed_and_not_tt_remaining_counts(self):
"""Counts are correct with two TaggedTraits that need followup."""
tag = factories.TagFactory.create()
dcc_reviews = factories.DCCReviewFactory.create_batch(
3, tagged_trait__tag=tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP)
archived_tagged_trait = dcc_reviews[0].tagged_trait
archived_tagged_trait.archive()
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1)
self.assertEqual(counts[0][0]['study_pk'], self.study.pk)
self.assertEqual(len(counts[0][1]), 1)
self.assertEqual(counts[0][1][0]['tag_pk'], tag.pk)
self.assertEqual(counts[0][1][0]['tt_remaining_count'], 2)
self.assertEqual(counts[0][1][0]['tt_completed_count'], 1)
def test_get_context_data_one_study_two_tags(self):
"""Counts are correct with one study and two tags."""
tag1 = factories.TagFactory.create(title='tag1')
factories.DCCReviewFactory.create_batch(
2,
tagged_trait__tag=tag1,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
tag2 = factories.TagFactory.create(title='tag2')
factories.DCCReviewFactory.create(
tagged_trait__tag=tag2,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1)
self.assertEqual(counts[0][0]['study_pk'], self.study.pk)
self.assertEqual(len(counts[0][1]), 2)
self.assertEqual(counts[0][1][0]['tag_pk'], tag1.pk)
self.assertEqual(counts[0][1][0]['tt_remaining_count'], 2)
self.assertEqual(counts[0][1][0]['tt_completed_count'], 0)
self.assertEqual(counts[0][1][1]['tag_pk'], tag2.pk)
self.assertEqual(counts[0][1][1]['tt_remaining_count'], 1)
self.assertEqual(counts[0][1][1]['tt_completed_count'], 0)
def test_get_context_data_two_studies_same_tag(self):
"""Counts are correct with two studies and one tag."""
# Make sure the second study comes last by appending zzz to the name.
other_study = StudyFactory.create(i_study_name=self.study.i_study_name + 'zzz')
self.user.profile.taggable_studies.add(other_study)
tag = factories.TagFactory.create()
factories.DCCReviewFactory.create_batch(
2,
tagged_trait__tag=tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
factories.DCCReviewFactory.create(
tagged_trait__tag=tag,
tagged_trait__trait__source_dataset__source_study_version__study=other_study,
status=models.DCCReview.STATUS_FOLLOWUP
)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 2)
# Check first study.
self.assertEqual(counts[0][0]['study_pk'], self.study.pk)
self.assertEqual(len(counts[0][1]), 1)
self.assertEqual(counts[0][1][0]['tag_pk'], tag.pk)
self.assertEqual(counts[0][1][0]['tt_remaining_count'], 2)
self.assertEqual(counts[0][1][0]['tt_completed_count'], 0)
# Check second study.
self.assertEqual(counts[1][0]['study_pk'], other_study.pk)
self.assertEqual(len(counts[1][1]), 1)
self.assertEqual(counts[1][1][0]['tag_pk'], tag.pk)
self.assertEqual(counts[1][1][0]['tt_remaining_count'], 1)
self.assertEqual(counts[1][1][0]['tt_completed_count'], 0)
def test_context_excludes_confirmed_trait(self):
"""Count does not include a TaggedTrait that is confirmed."""
factories.DCCReviewFactory.create(
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_CONFIRMED
)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 0)
def test_context_includes_taggedtrait_with_dccdecision_confirm_no_studyresponse(self):
"""Count does not include a TaggedTrait that has a confirm DCCDecision but no StudyResponse."""
dcc_review = factories.DCCReviewFactory.create(
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=dcc_review, decision=models.DCCDecision.DECISION_CONFIRM)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 0)
def test_context_includes_taggedtrait_with_dccdecision_remove_no_studyresponse(self):
"""Count does not include a TaggedTrait that has a remove DCCDecision but no StudyResponse."""
dcc_review = factories.DCCReviewFactory.create(
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 0)
def test_context_includes_taggedtrait_with_dccdecision_confirm_studyresponse_disagree(self):
"""Count does not include a TaggedTrait that has a confirm DCCDecision and a disagree StudyResponse."""
dcc_review = factories.DCCReviewFactory.create(
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
study_response = factories.StudyResponseFactory.create(
dcc_review=dcc_review, status=models.StudyResponse.STATUS_DISAGREE)
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=dcc_review, decision=models.DCCDecision.DECISION_CONFIRM)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1)
def test_context_includes_taggedtrait_with_dccdecision_remove_studyresponse_disagree(self):
"""Count does not include a TaggedTrait that has a remove DCCDecision and a disagree StudyResponse."""
dcc_review = factories.DCCReviewFactory.create(
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
study_response = factories.StudyResponseFactory.create(
dcc_review=dcc_review, status=models.StudyResponse.STATUS_DISAGREE)
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1)
def test_includes_only_taggable_studies(self):
"""Only studies that the user can tag are included."""
other_study = StudyFactory.create()
factories.DCCReviewFactory.create(
tagged_trait__trait__source_dataset__source_study_version__study=other_study,
status=models.DCCReview.STATUS_FOLLOWUP
)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 0)
def test_context_does_not_include_tags_with_no_followup_traits(self):
"""Tags are not in context data when they have no TaggedTraits that need followup."""
tag = factories.TagFactory.create()
factories.DCCReviewFactory.create(
tagged_trait__tag=tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
tag2 = factories.TagFactory.create()
factories.DCCReviewFactory.create(
tagged_trait__tag=tag2,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_CONFIRMED
)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1)
self.assertEqual(counts[0][0]['study_pk'], self.study.pk)
self.assertEqual(len(counts[0][1]), 1)
self.assertEqual(counts[0][1][0]['tag_pk'], tag.pk)
self.assertEqual(counts[0][1][0]['tt_remaining_count'], 1)
self.assertEqual(counts[0][1][0]['tt_completed_count'], 0)
def test_context_does_not_include_tags_with_no_followup_traits_different_studies(self):
"""Tag for one study is not in context data when they have no TaggedTraits that need followup."""
other_study = StudyFactory.create(i_study_name=self.study.i_study_name + 'zzz')
self.user.profile.taggable_studies.add(other_study)
tag1 = factories.TagFactory.create()
tag2 = factories.TagFactory.create()
factories.DCCReviewFactory.create(
tagged_trait__tag=tag1,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
factories.DCCReviewFactory.create_batch(
2,
tagged_trait__tag=tag2,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_CONFIRMED
)
factories.DCCReviewFactory.create(
tagged_trait__tag=tag1,
tagged_trait__trait__source_dataset__source_study_version__study=other_study,
status=models.DCCReview.STATUS_CONFIRMED
)
factories.DCCReviewFactory.create_batch(
2,
tagged_trait__tag=tag2,
tagged_trait__trait__source_dataset__source_study_version__study=other_study,
status=models.DCCReview.STATUS_FOLLOWUP
)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 2)
self.assertEqual(counts[0][0]['study_pk'], self.study.pk)
self.assertEqual(len(counts[0][1]), 1)
self.assertEqual(counts[0][1][0]['tag_pk'], tag1.pk)
self.assertEqual(counts[0][1][0]['tt_remaining_count'], 1)
self.assertEqual(counts[0][1][0]['tt_completed_count'], 0)
# Check second study.
self.assertEqual(counts[1][0]['study_pk'], other_study.pk)
self.assertEqual(len(counts[1][1]), 1)
self.assertEqual(counts[1][1][0]['tag_pk'], tag2.pk)
self.assertEqual(counts[1][1][0]['tt_remaining_count'], 2)
self.assertEqual(counts[1][1][0]['tt_completed_count'], 0)
def test_context_with_tagged_traits_with_and_without_responses(self):
"""Counts are correct with a mix of tagged traits that are reviewed or require review."""
n_confirmed = 15
n_need_review = 20
n_review_completed = 32
tag = factories.TagFactory.create()
factories.DCCReviewFactory.create_batch(
n_confirmed,
tagged_trait__tag=tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_CONFIRMED
)
factories.DCCReviewFactory.create_batch(
n_need_review,
tagged_trait__tag=tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
factories.StudyResponseFactory.create_batch(
n_review_completed,
dcc_review__tagged_trait__tag=tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study,
dcc_review__status=models.DCCReview.STATUS_FOLLOWUP
)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1)
self.assertEqual(counts[0][0]['study_pk'], self.study.pk)
self.assertEqual(len(counts[0][1]), 1)
self.assertEqual(counts[0][1][0]['tag_pk'], tag.pk)
self.assertEqual(counts[0][1][0]['tt_remaining_count'], n_need_review)
self.assertEqual(counts[0][1][0]['tt_completed_count'], n_review_completed)
def test_link_button_says_begin_if_no_tagged_traits_need_review(self):
"""Link button to tag+study study response table says 'begin' if responses are completed."""
tag = factories.TagFactory.create()
factories.StudyResponseFactory.create_batch(
2,
dcc_review__tagged_trait__tag=tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study,
dcc_review__status=models.DCCReview.STATUS_FOLLOWUP
)
response = self.client.get(self.get_url())
self.assertNotContains(response, 'Begin quality review')
self.assertContains(response, 'View quality review')
def test_begin_review_button_is_not_present_if_all_tagged_traits_are_archived_without_study_response(self):
"""Link button to tag+study study response table says 'view' if all tagged traits are archived."""
tag = factories.TagFactory.create()
factories.DCCReviewFactory.create_batch(
2, tagged_trait__tag=tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
models.TaggedTrait.objects.update(archived=True)
response = self.client.get(self.get_url())
self.assertNotContains(response, 'Begin quality review')
self.assertContains(response, 'View quality review')
def test_begin_review_button_is_present_if_some_tagged_traits_need_review(self):
"""Link button to tag+study study response table says 'begin' if some responses need to be completed still."""
tag = factories.TagFactory.create()
factories.DCCReviewFactory.create_batch(
2,
tagged_trait__tag=tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
response = self.client.get(self.get_url())
self.assertContains(response, 'Begin quality review')
self.assertNotContains(response, 'View quality review')
def test_navbar_does_not_contain_link(self):
"""Phenotype taggers do see a link to the main quality review page in the navbar."""
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, """<a href="{}">""".format(self.get_url()))
def test_no_deprecated_traits(self):
"""Count does not include TaggedTraits whose SourceTrait has been deprecated."""
tag = factories.TagFactory.create()
study_version = SourceStudyVersionFactory.create(study=self.study)
factories.StudyResponseFactory.create(
dcc_review__tagged_trait__tag=tag,
dcc_review__status=models.DCCReview.STATUS_FOLLOWUP,
dcc_review__tagged_trait__trait__source_dataset__source_study_version=study_version
)
study_version.i_is_deprecated = True
study_version.save()
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 0)
class TaggedTraitsNeedStudyResponseSummaryDCCAnalystTest(DCCAnalystLoginTestCase):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:quality-review', args=args)
def test_get_forbidden(self):
"""Get returns forbidden status code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_navbar_does_not_contain_link(self):
"""DCC analysts do not see a link to the main quality review page in the navbar."""
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, """<a href="{}">""".format(self.get_url()))
class TaggedTraitsNeedStudyResponseSummaryOtherUserTest(UserLoginTestCase):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:quality-review', args=args)
def test_get_forbidden(self):
"""Get returns forbidden status code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_navbar_does_not_contain_link(self):
"""Regular users do not see a link to the main quality review page in the navbar."""
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, """<a href="{}">""".format(self.get_url()))
class TaggedTraitsNeedStudyResponseByTagAndStudyListTestsMixin(object):
"""Tests to include in all user type test cases for this view."""
def test_view_with_invalid_study_pk(self):
"""Returns 404 response code when the study pk doesn't exist."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk + 1))
self.assertEqual(response.status_code, 404)
def test_view_with_invalid_tag_pk(self):
"""Returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.tag.pk + 1, self.study.pk))
self.assertEqual(response.status_code, 404)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertIn('study', context)
self.assertIn('tag', context)
self.assertIn('tagged_trait_table', context)
self.assertEqual(context['study'], self.study)
self.assertEqual(context['tag'], self.tag)
def test_table_class(self):
"""The table class is appropriate."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertIsInstance(context['tagged_trait_table'], tables.TaggedTraitDCCReviewTable)
def test_includes_tagged_traits_that_need_followup(self):
"""Table includes TaggedTraits that need followup."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
table = context['tagged_trait_table']
self.assertEqual(len(table.data), len(self.dcc_reviews))
for dcc_review in self.dcc_reviews:
self.assertIn(dcc_review.tagged_trait, table.data,
msg='tagged_trait_table does not contain {}'.format(dcc_review.tagged_trait))
def test_excludes_unreviewed_tagged_trait(self):
"""Table excludes unreviewed TaggedTrait."""
unreviewed_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag,
trait__source_dataset__source_study_version__study=self.study
)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
table = context['tagged_trait_table']
self.assertNotIn(unreviewed_tagged_trait, table.data)
for dcc_review in self.dcc_reviews:
self.assertIn(dcc_review.tagged_trait, table.data,
msg='tagged_trait_table does not contain {}'.format(dcc_review.tagged_trait))
self.assertEqual(len(table.data), len(self.dcc_reviews))
def test_excludes_tagged_trait_with_confirm_dccdecision_but_no_studyresponse(self):
"""Table excludes a TaggedTrait with no StudyResponse and a confirm DCCDecision."""
tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study)
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review__tagged_trait=tagged_trait, decision=models.DCCDecision.DECISION_CONFIRM)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
table = context['tagged_trait_table']
self.assertNotIn(tagged_trait, table.data)
for dcc_review in self.dcc_reviews:
self.assertIn(dcc_review.tagged_trait, table.data,
msg='tagged_trait_table does not contain {}'.format(dcc_review.tagged_trait))
self.assertEqual(len(table.data), len(self.dcc_reviews))
def test_excludes_tagged_trait_with_remove_dccdecision_but_no_studyresponse(self):
"""Table excludes a TaggedTrait with no StudyResponse and a remove DCCDecision."""
tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study)
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review__tagged_trait=tagged_trait, decision=models.DCCDecision.DECISION_REMOVE)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
table = context['tagged_trait_table']
self.assertNotIn(tagged_trait, table.data)
for dcc_review in self.dcc_reviews:
self.assertIn(dcc_review.tagged_trait, table.data,
msg='tagged_trait_table does not contain {}'.format(dcc_review.tagged_trait))
self.assertEqual(len(table.data), len(self.dcc_reviews))
def test_includes_tagged_trait_with_confirm_dccdecision_with_studyresponse(self):
"""Table includes a TaggedTrait with disagree StudyResponse and a confirm DCCDecision."""
tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study)
study_response = factories.StudyResponseFactory.create(
dcc_review__tagged_trait=tagged_trait, status=models.StudyResponse.STATUS_DISAGREE)
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=study_response.dcc_review, decision=models.DCCDecision.DECISION_CONFIRM)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
table = context['tagged_trait_table']
self.assertIn(tagged_trait, table.data)
for dcc_review in self.dcc_reviews:
self.assertIn(dcc_review.tagged_trait, table.data,
msg='tagged_trait_table does not contain {}'.format(dcc_review.tagged_trait))
self.assertEqual(len(table.data), len(self.dcc_reviews) + 1)
def test_includes_tagged_trait_with_remove_dccdecision_with_studyresponse(self):
"""Table includes a TaggedTrait with disagree StudyResponse and a remove DCCDecision."""
tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study)
study_response = factories.StudyResponseFactory.create(
dcc_review__tagged_trait=tagged_trait, status=models.StudyResponse.STATUS_DISAGREE)
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=study_response.dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
table = context['tagged_trait_table']
self.assertIn(tagged_trait, table.data)
for dcc_review in self.dcc_reviews:
self.assertIn(dcc_review.tagged_trait, table.data,
msg='tagged_trait_table does not contain {}'.format(dcc_review.tagged_trait))
self.assertEqual(len(table.data), len(self.dcc_reviews) + 1)
def test_success_with_no_matching_tagged_traits(self):
"""Successful response code when there are no TaggedTraits to include."""
other_tag = factories.TagFactory.create()
response = self.client.get(self.get_url(other_tag.pk, self.study.pk))
self.assertEqual(response.status_code, 200)
context = response.context
self.assertEqual(len(context['tagged_trait_table'].data), 0)
def test_excludes_tagged_traits_from_a_different_study(self):
"""Table does not include TaggedTraits from a different study."""
other_study = StudyFactory.create()
other_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=other_study)
factories.DCCReviewFactory.create(tagged_trait=other_tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertNotIn(other_tagged_trait, context['tagged_trait_table'].data)
def test_excludes_tagged_traits_from_a_different_tag(self):
"""Table does not contain TaggedTraits from a different tag."""
other_tag = factories.TagFactory.create()
other_tagged_trait = factories.TaggedTraitFactory.create(
tag=other_tag, trait__source_dataset__source_study_version__study=self.study)
factories.DCCReviewFactory.create(tagged_trait=other_tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertNotIn(other_tagged_trait, context['tagged_trait_table'].data)
def test_excludes_deprecated_tagged_trait(self):
"""Table excludes deprecated TaggedTrait."""
study_version = SourceStudyVersionFactory.create(study=self.study)
deprecated_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag,
trait__source_dataset__source_study_version=study_version
)
dcc_review = factories.DCCReviewFactory.create(
tagged_trait=deprecated_tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
study_version.i_is_deprecated = True
study_version.save()
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
table = context['tagged_trait_table']
self.assertNotIn(deprecated_tagged_trait, table.data)
for dcc_review in self.dcc_reviews:
self.assertIn(dcc_review.tagged_trait, table.data,
msg='tagged_trait_table does not contain {}'.format(dcc_review.tagged_trait))
self.assertEqual(len(table.data), len(self.dcc_reviews))
class TaggedTraitsNeedStudyResponseByTagAndStudyListPhenotypeTaggerTest(
TaggedTraitsNeedStudyResponseByTagAndStudyListTestsMixin, PhenotypeTaggerLoginTestCase):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.dcc_reviews = factories.DCCReviewFactory.create_batch(
10,
tagged_trait__tag=self.tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
def get_url(self, *args):
return reverse('tags:tag:study:quality-review', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertEqual(response.status_code, 200)
def test_table_class(self):
"""Table class is correct."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertIs(type(response.context['tagged_trait_table']),
tables.TaggedTraitDCCReviewStudyResponseButtonTable)
def test_forbidden_for_other_study(self):
"""Returns forbidden response code for a study that the user can't tag."""
other_study = StudyFactory.create()
response = self.client.get(self.get_url(self.tag.pk, other_study.pk))
self.assertEqual(response.status_code, 403)
def test_csrf_token(self):
"""Response contains a csrf token when study response buttons are present."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertContains(response, "name='csrfmiddlewaretoken'")
def test_buttons_for_need_followup_tagged_trait(self):
"""Buttons are shown for TaggedTraits that need followup and have no StudyResponse."""
models.TaggedTrait.objects.hard_delete()
dcc_review = factories.DCCReviewFactory.create(
tagged_trait__tag=self.tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
tagged_trait = dcc_review.tagged_trait
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
expected_url = reverse('tags:tagged-traits:pk:quality-review:remove', args=[tagged_trait.pk])
self.assertContains(response, expected_url)
expected_url = reverse('tags:tagged-traits:pk:quality-review:explain', args=[tagged_trait.pk])
self.assertContains(response, expected_url)
def test_no_buttons_for_need_followup_tagged_trait_with_agree_response(self):
"""Buttons are not shown for TaggedTraits that need followup and have an "agree" StudyResponse."""
models.TaggedTrait.objects.hard_delete()
dcc_review = factories.DCCReviewFactory.create(
tagged_trait__tag=self.tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
study_response = factories.StudyResponseFactory.create(
dcc_review=dcc_review,
status=models.StudyResponse.STATUS_AGREE
)
tagged_trait = dcc_review.tagged_trait
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
expected_url = reverse('tags:tagged-traits:pk:quality-review:remove', args=[tagged_trait.pk])
self.assertNotContains(response, expected_url)
expected_url = reverse('tags:tagged-traits:pk:quality-review:explain', args=[tagged_trait.pk])
self.assertNotContains(response, expected_url)
def test_no_buttons_for_need_followup_tagged_trait_with_disagree_response(self):
"""Buttons are not shown for TaggedTraits that need followup and have a "disagree" StudyResponse."""
models.TaggedTrait.objects.hard_delete()
dcc_review = factories.DCCReviewFactory.create(
tagged_trait__tag=self.tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
study_response = factories.StudyResponseFactory.create(
dcc_review=dcc_review,
status=models.StudyResponse.STATUS_DISAGREE
)
tagged_trait = dcc_review.tagged_trait
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
expected_url = reverse('tags:tagged-traits:pk:quality-review:remove', args=[tagged_trait.pk])
self.assertNotContains(response, expected_url)
expected_url = reverse('tags:tagged-traits:pk:quality-review:explain', args=[tagged_trait.pk])
self.assertNotContains(response, expected_url)
def test_no_buttons_for_need_followup_tagged_trait_no_response_and_archived(self):
"""Buttons are not shown for TaggedTraits that need followup, have no StudyResponse, and are archived."""
models.TaggedTrait.objects.hard_delete()
dcc_review = factories.DCCReviewFactory.create(
tagged_trait__tag=self.tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
tagged_trait = dcc_review.tagged_trait
tagged_trait.archive()
tagged_trait.refresh_from_db()
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
expected_url = reverse('tags:tagged-traits:pk:quality-review:remove', args=[tagged_trait.pk])
self.assertNotContains(response, expected_url)
expected_url = reverse('tags:tagged-traits:pk:quality-review:explain', args=[tagged_trait.pk])
self.assertNotContains(response, expected_url)
def test_no_buttons_for_need_followup_tagged_trait_with_response_and_archived(self):
"""Buttons are not shown for TaggedTraits that need followup, have an agree StudyResponse, and are archived."""
models.TaggedTrait.objects.hard_delete()
dcc_review = factories.DCCReviewFactory.create(
tagged_trait__tag=self.tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP)
study_response = factories.StudyResponseFactory.create(
dcc_review=dcc_review,
status=models.StudyResponse.STATUS_AGREE)
tagged_trait = dcc_review.tagged_trait
tagged_trait.archive()
tagged_trait.refresh_from_db()
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
expected_url = reverse('tags:tagged-traits:pk:quality-review:remove', args=[tagged_trait.pk])
self.assertNotContains(response, expected_url)
expected_url = reverse('tags:tagged-traits:pk:quality-review:explain', args=[tagged_trait.pk])
self.assertNotContains(response, expected_url)
def test_table_includes_archived_tagged_trait(self):
"""An archived tagged trait that needs followup is included in the table."""
archived_tagged_trait = models.TaggedTrait.objects.first()
archived_tagged_trait.archive()
archived_tagged_trait.refresh_from_db()
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
table = response.context['tagged_trait_table']
self.assertIn(archived_tagged_trait, table.data)
class TaggedTraitsNeedStudyResponseByTagAndStudyListDCCAnalystTest(
TaggedTraitsNeedStudyResponseByTagAndStudyListTestsMixin, DCCAnalystLoginTestCase):
def setUp(self):
super().setUp()
self.study = StudyFactory.create()
self.tag = factories.TagFactory.create()
self.dcc_reviews = factories.DCCReviewFactory.create_batch(
10,
tagged_trait__tag=self.tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
def get_url(self, *args):
return reverse('tags:tag:study:quality-review', args=args)
def test_table_class(self):
"""Table class is correct."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertIs(type(response.context['tagged_trait_table']), tables.TaggedTraitDCCReviewTable)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertEqual(response.status_code, 200)
class TaggedTraitsNeedStudyResponseByTagAndStudyListOtherUserTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.study = StudyFactory.create()
self.tag = factories.TagFactory.create()
self.dcc_reviews = factories.DCCReviewFactory.create_batch(
10,
tagged_trait__tag=self.tag,
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.DCCReview.STATUS_FOLLOWUP
)
def get_url(self, *args):
return reverse('tags:tag:study:quality-review', args=args)
def test_get_forbidden(self):
"""Get returns forbidden response code for non-taggers and non-staff."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertEqual(response.status_code, 403)
class StudyResponseCreateAgreeOtherUserTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.tagged_trait = factories.TaggedTraitFactory.create(tag=self.tag)
factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
def get_url(self, *args):
return reverse('tags:tagged-traits:pk:quality-review:remove', args=args)
def test_post_forbidden(self):
"""Post returns a 403 forbidden status code for non-taggers."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertEqual(response.status_code, 403)
def test_get_forbidden(self):
"""Get returns a 403 forbidden status code for non-taggers."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
class StudyResponseCreateAgreePhenotypeTaggerTest(PhenotypeTaggerLoginTestCase):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag,
trait__source_dataset__source_study_version__study=self.study
)
factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
def get_url(self, *args):
return reverse('tags:tagged-traits:pk:quality-review:remove', args=args)
def test_get_method_not_allowed(self):
"""Returns a method not allowed status code for get requests."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 405)
def test_can_create_study_response(self):
"""Creates a study response as expected."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertTrue(hasattr(self.tagged_trait.dcc_review, 'study_response'))
study_response = self.tagged_trait.dcc_review.study_response
self.assertEqual(study_response.status, models.StudyResponse.STATUS_AGREE)
self.assertEqual(study_response.comment, '')
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_missing_tagged_trait(self):
"""Returns 404 status with missing tagged trait."""
response = self.client.post(self.get_url(self.tagged_trait.pk + 1), {})
self.assertEqual(response.status_code, 404)
def test_missing_dcc_review(self):
"""Redirects with warning message if DCCReview doesn't exist."""
self.tagged_trait.dcc_review.delete()
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('has not been reviewed' in str(messages[0]))
def test_archived_tagged_trait(self):
"""Redirects with warning message if the tagged trait has been archived."""
self.tagged_trait.archive()
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('removed' in str(messages[0]))
def test_confirmed_dcc_review(self):
"""Redirects with warning message if DCCReview status is confirmed."""
self.tagged_trait.dcc_review.delete()
factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_CONFIRMED)
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'study_response'))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('has been confirmed' in str(messages[0]))
def test_studyresponse_exists(self):
"""Redirects with warning message if a StudyResponse already exists."""
factories.StudyResponseFactory.create(dcc_review=self.tagged_trait.dcc_review,
status=models.StudyResponse.STATUS_DISAGREE)
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertTrue(hasattr(self.tagged_trait.dcc_review, 'study_response'))
study_response = self.tagged_trait.dcc_review.study_response
# Make sure it was not updated.
self.assertEqual(study_response.status, models.StudyResponse.STATUS_DISAGREE)
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_cant_create_study_response_for_other_study_tagged_trait(self):
"""Can't review tagged traits from a different study."""
# This is a suggested test, but we need to decide on the expected behavior.
other_tagged_trait = factories.TaggedTraitFactory.create()
factories.DCCReviewFactory.create(tagged_trait=other_tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
response = self.client.post(self.get_url(other_tagged_trait.pk), {})
self.assertFalse(hasattr(other_tagged_trait.dcc_review, 'study_response'))
self.assertEqual(response.status_code, 403)
def test_cant_create_study_response_for_tagged_trait_with_dcc_decision_confirm(self):
"""Redirects with warning message if the tagged trait has a confirm dcc decision."""
self.tagged_trait.dcc_review.delete()
dcc_review = factories.DCCReviewFactory.create(
tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=dcc_review, decision=models.DCCDecision.DECISION_CONFIRM)
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'study_response'))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('already has a dcc decision' in str(messages[0]))
def test_cant_create_study_response_for_tagged_trait_with_dcc_decision_remove(self):
"""Redirects with warning message if the tagged trait has a remove dcc decision."""
self.tagged_trait.dcc_review.delete()
dcc_review = factories.DCCReviewFactory.create(
tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'study_response'))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('already has a dcc decision' in str(messages[0]))
def test_adds_user(self):
"""When a StudyResponse is successfully created, it has the appropriate creator."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertEqual(self.tagged_trait.dcc_review.study_response.creator, self.user)
def test_archives_tagged_trait_after_response(self):
"""When a StudyResponse is successfully created, the tagged trait is archived."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.tagged_trait.refresh_from_db()
self.assertTrue(self.tagged_trait.archived)
def test_get_deprecated_trait(self):
"""Redirects with warning message if the trait has been deprecated."""
study_version = self.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('newer version' in str(messages[0]))
def test_post_deprecated_trait(self):
"""Redirects with warning message if the trait has been deprecated."""
study_version = self.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.tagged_trait.refresh_from_db()
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'study_response'))
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('newer version' in str(messages[0]))
class StudyResponseCreateAgreeDCCAnalystTest(DCCAnalystLoginTestCase):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.tagged_trait = factories.TaggedTraitFactory.create(tag=self.tag)
factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
def get_url(self, *args):
return reverse('tags:tagged-traits:pk:quality-review:remove', args=args)
def test_post_forbidden(self):
"""Post returns a 403 forbidden status code for non-taggers."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertEqual(response.status_code, 403)
def test_get_forbidden(self):
"""Get returns a 403 forbidden status code for non-taggers."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
class StudyResponseCreateDisagreeOtherUserTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.tagged_trait = factories.TaggedTraitFactory.create(tag=self.tag)
factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
def get_url(self, *args):
return reverse('tags:tagged-traits:pk:quality-review:explain', args=args)
def test_post_forbidden(self):
"""Post returns a 403 forbidden status code for non-taggers."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertEqual(response.status_code, 403)
def test_get_forbidden(self):
"""Get returns a 403 forbidden status code for non-taggers."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
class StudyResponseCreateDisagreePhenotypeTaggerTest(PhenotypeTaggerLoginTestCase):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag,
trait__source_dataset__source_study_version__study=self.study
)
factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
def get_url(self, *args):
return reverse('tags:tagged-traits:pk:quality-review:explain', args=args)
def test_view_success(self):
"""View loads correctly."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""Context contains the correct values."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertIn('tagged_trait', context)
self.assertEqual(context['tagged_trait'], self.tagged_trait)
self.assertIn('form', context)
self.assertIsInstance(context['form'], forms.StudyResponseDisagreeForm)
self.assertFalse(context['form'].is_bound)
def test_can_create_study_response(self):
"""Creates a study response as expected."""
comment = 'a comment'
response = self.client.post(self.get_url(self.tagged_trait.pk), {'comment': comment})
self.assertTrue(hasattr(self.tagged_trait.dcc_review, 'study_response'))
study_response = self.tagged_trait.dcc_review.study_response
self.assertEqual(study_response.status, models.StudyResponse.STATUS_DISAGREE)
self.assertEqual(study_response.comment, comment)
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertFalse('Oops!' in str(messages[0]))
def test_post_invalid_form(self):
"""Posting an invalid form does not create a study response."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {'comment': ''})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'comment', 'This field is required.')
self.tagged_trait.refresh_from_db()
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'study_response'))
form = response.context['form']
self.assertTrue(form.has_error('comment'))
def test_get_missing_tagged_trait(self):
"""Returns 404 status with missing tagged trait."""
response = self.client.get(self.get_url(self.tagged_trait.pk + 1))
self.assertEqual(response.status_code, 404)
def test_post_missing_tagged_trait(self):
"""Returns 404 status with missing tagged trait."""
response = self.client.post(self.get_url(self.tagged_trait.pk + 1), {'comment': 'a comment'})
self.assertEqual(response.status_code, 404)
def test_get_missing_dcc_review(self):
"""Get redirects with warning message if DCCReview doesn't exist."""
self.tagged_trait.dcc_review.delete()
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('has not been reviewed' in str(messages[0]))
def test_post_missing_dcc_review(self):
"""Post redirects with warning message if a DCCReview doesn't exist."""
self.tagged_trait.dcc_review.delete()
response = self.client.post(self.get_url(self.tagged_trait.pk), {'comment': 'a comment'})
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_get_archived_tagged_trait(self):
"""Get redirects with warning message if the tagged trait has been archived."""
self.tagged_trait.archive()
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('removed' in str(messages[0]))
def test_post_archived_tagged_trait(self):
"""Post redirects with warning message if the tagged trait has been archived."""
self.tagged_trait.archive()
response = self.client.post(self.get_url(self.tagged_trait.pk), {'comment': 'a comment'})
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_confirmed_dcc_review(self):
"""Redirects with warning message if DCCReview status is confirmed."""
self.tagged_trait.dcc_review.delete()
factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_CONFIRMED)
response = self.client.post(self.get_url(self.tagged_trait.pk), {'comment': 'a comment'})
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'study_response'))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('has been confirmed' in str(messages[0]))
def test_cant_create_study_response_for_tagged_trait_with_dcc_decision_confirm(self):
"""Redirects to quality review page with warning message if the tagged trait has a confirm dcc decision."""
self.tagged_trait.dcc_review.delete()
dcc_review = factories.DCCReviewFactory.create(
tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=dcc_review, decision=models.DCCDecision.DECISION_CONFIRM)
response = self.client.post(self.get_url(self.tagged_trait.pk), {'comment': 'a comment'})
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'study_response'))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('already has a dcc decision' in str(messages[0]))
def test_cant_create_study_response_for_tagged_trait_with_dcc_decision_remove(self):
"""Redirects to quality review page with warning message if tagged trait has a remove dcc decision."""
self.tagged_trait.dcc_review.delete()
dcc_review = factories.DCCReviewFactory.create(
tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
response = self.client.post(self.get_url(self.tagged_trait.pk), {'comment': 'a comment'})
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'study_response'))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('already has a dcc decision' in str(messages[0]))
def test_get_studyresponse_exists(self):
"""Get redirects with warning message if a StudyResponse already exists."""
factories.StudyResponseFactory.create(dcc_review=self.tagged_trait.dcc_review,
status=models.StudyResponse.STATUS_AGREE)
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertTrue(hasattr(self.tagged_trait.dcc_review, 'study_response'))
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_post_studyresponse_exists(self):
"""Post redirects with warning message if a StudyResponse already exists."""
factories.StudyResponseFactory.create(dcc_review=self.tagged_trait.dcc_review,
status=models.StudyResponse.STATUS_AGREE)
response = self.client.post(self.get_url(self.tagged_trait.pk), {'comment': 'a comment'})
self.assertTrue(hasattr(self.tagged_trait.dcc_review, 'study_response'))
study_response = self.tagged_trait.dcc_review.study_response
# Make sure it was not updated.
self.assertEqual(study_response.status, models.StudyResponse.STATUS_AGREE)
self.assertEqual(study_response.comment, '')
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
def test_get_cant_create_study_response_for_other_study_tagged_trait(self):
"""Get returns forbidden status for tagged trait from a different study."""
other_tagged_trait = factories.TaggedTraitFactory.create()
factories.DCCReviewFactory.create(tagged_trait=other_tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
response = self.client.get(self.get_url(other_tagged_trait.pk))
self.assertFalse(hasattr(other_tagged_trait.dcc_review, 'study_response'))
self.assertEqual(response.status_code, 403)
def test_post_cant_create_study_response_for_other_study_tagged_trait(self):
"""Post returns forbidden status for tagged trait from a different study."""
other_tagged_trait = factories.TaggedTraitFactory.create()
factories.DCCReviewFactory.create(tagged_trait=other_tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
response = self.client.post(self.get_url(other_tagged_trait.pk), {})
self.assertFalse(hasattr(other_tagged_trait.dcc_review, 'study_response'))
self.assertEqual(response.status_code, 403)
def test_adds_user(self):
"""When a StudyResponse is successfully created, it has the appropriate creator."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {'comment': 'a comment', })
self.assertEqual(self.tagged_trait.dcc_review.study_response.creator, self.user)
def test_does_not_archive_tagged_trait(self):
"""When a disagree StudyResponse is successfully created, the tagged trait is not archived."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {'comment': 'a comment', })
self.tagged_trait.refresh_from_db()
self.assertFalse(self.tagged_trait.archived)
def test_no_other_tags(self):
"""Other tags linked to the same trait are not included in the page."""
another_tagged_trait = factories.TaggedTraitFactory.create(trait=self.tagged_trait.trait)
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertNotIn('show_other_tags', context)
content = str(response.content)
self.assertNotIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_trait.tag.title, content)
def test_get_deprecated_trait(self):
"""Redirects a get request with warning message if the trait has been deprecated."""
study_version = self.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('newer version' in str(messages[0]))
def test_post_deprecated_trait(self):
"""Redirects a post request with warning message if the trait has been deprecated."""
study_version = self.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
response = self.client.post(self.get_url(self.tagged_trait.pk), {'comment': 'foo'})
self.tagged_trait.refresh_from_db()
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'study_response'))
self.assertRedirects(response, reverse('tags:tag:study:quality-review',
args=[self.tag.pk, self.study.pk]))
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertTrue('Oops!' in str(messages[0]))
self.assertTrue('newer version' in str(messages[0]))
class StudyResponseCreateDisagreeDCCAnalystTest(DCCAnalystLoginTestCase):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.tagged_trait = factories.TaggedTraitFactory.create(tag=self.tag)
factories.DCCReviewFactory.create(tagged_trait=self.tagged_trait, status=models.DCCReview.STATUS_FOLLOWUP)
def get_url(self, *args):
return reverse('tags:tagged-traits:pk:quality-review:explain', args=args)
def test_post_forbidden(self):
"""Post returns a 403 forbidden status code for non-taggers."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertEqual(response.status_code, 403)
def test_get_forbidden(self):
"""Get returns a 403 forbidden status code for non-taggers."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
class TaggedTraitsNeedDCCDecisionSummaryTestMixin(object):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:need-decision', args=args)
def test_view_success(self):
"""Returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_get_context_data(self):
"""Context contains expected variables."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
def test_counts_blank_with_zero_tagged_traits(self):
"""Correct count when there are no tagged traits."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 0)
def test_counts_exclude_confirmed_tagged_trait(self):
"""Count does not include tagged trait with DCCReview status confirmed."""
dcc_review = factories.DCCReviewFactory.create(status=models.DCCReview.STATUS_CONFIRMED)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 0)
def test_counts_exclude_deprecated_tagged_trait(self):
study_response = factories.StudyResponseFactory.create(
status=models.StudyResponse.STATUS_DISAGREE,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__i_is_deprecated=True
)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 0)
def test_counts_exclude_needfollowup_noresponse_tagged_trait(self):
"""Count does not include tagged trait of status need followup with no study response."""
dcc_review = factories.DCCReviewFactory.create(status=models.DCCReview.STATUS_FOLLOWUP)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 0)
def test_counts_exclude_needfollowup_agree_tagged_trait(self):
"""Count does not include tagged trait of status need followup with agree study response."""
study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_AGREE)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 0)
def test_counts_for_needfollowup_disagree_tagged_trait(self):
"""Correct counts when only one tagged trait needs a decision."""
study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1) # One study.
study1 = counts[0]
self.assertEqual(study1[1][0]['tt_total'], 1)
self.assertEqual(study1[1][0]['tt_decision_required_count'], 1)
def test_counts_for_needfollowup_disagree_tagged_trait_with_remove_decision(self):
"""Correct counts when only one tagged trait needed a decision, and it's been decided to remove."""
study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
decision = factories.DCCDecisionFactory.create(
dcc_review=study_response.dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1) # One study.
study1 = counts[0]
self.assertEqual(study1[1][0]['tt_total'], 1)
self.assertEqual(study1[1][0]['tt_decision_required_count'], 0) # Decision has been made.
def test_counts_for_needfollowup_disagree_tagged_trait_with_confirm_decision(self):
"""Correct counts when only one tagged trait needed a decision, and it's been decided to confirm."""
study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
decision = factories.DCCDecisionFactory.create(
dcc_review=study_response.dcc_review, decision=models.DCCDecision.DECISION_CONFIRM)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1) # One study.
study1 = counts[0]
self.assertEqual(study1[1][0]['tt_total'], 1)
self.assertEqual(study1[1][0]['tt_decision_required_count'], 0) # Decision has been made.
def test_counts_for_needfollowup_disagree_tagged_trait_with_remove_decision_archived(self):
"""Correct counts when only one tagged trait needed a decision, and it's been decided to remove, and it's archived.""" # noqa
study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
decision = factories.DCCDecisionFactory.create(
dcc_review=study_response.dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
study_response.dcc_review.tagged_trait.archive()
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1) # One study.
study1 = counts[0]
self.assertEqual(study1[1][0]['tt_total'], 1)
self.assertEqual(study1[1][0]['tt_decision_required_count'], 0) # Decision has been made.
def test_counts_for_needfollowup_disagree_tagged_traits_from_two_studies(self):
"""Correct counts when tagged traits from two studies need decisions."""
study_response1 = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
study_response2 = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 2) # Two studies.
study1 = counts[0]
self.assertEqual(study1[1][0]['tt_total'], 1)
self.assertEqual(study1[1][0]['tt_decision_required_count'], 1)
study2 = counts[1]
self.assertEqual(study2[1][0]['tt_total'], 1)
self.assertEqual(study2[1][0]['tt_decision_required_count'], 1)
def test_counts_for_two_needfollowup_disagree_tagged_traits_from_same_study_and_tag(self):
"""Correct counts when two tagged traits from the same study and tag need decisions."""
study_version = SourceStudyVersionFactory.create()
tag = factories.TagFactory.create()
study_responses = factories.StudyResponseFactory.create_batch(
2, status=models.StudyResponse.STATUS_DISAGREE,
dcc_review__tagged_trait__trait__source_dataset__source_study_version=study_version,
dcc_review__tagged_trait__tag=tag)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1) # One study.
study1 = counts[0]
self.assertEqual(study1[1][0]['tt_total'], 2)
self.assertEqual(study1[1][0]['tt_decision_required_count'], 2)
def test_counts_for_two_needfollowup_disagree_tagged_traits_with_remove_decision_from_same_study_and_tag(self):
"""Correct counts when two tagged traits from the same study and tag already have decisions to remove."""
study_version = SourceStudyVersionFactory.create()
tag = factories.TagFactory.create()
study_responses = factories.StudyResponseFactory.create_batch(
2, status=models.StudyResponse.STATUS_DISAGREE,
dcc_review__tagged_trait__trait__source_dataset__source_study_version=study_version,
dcc_review__tagged_trait__tag=tag)
decisions = [factories.DCCDecisionFactory.create(dcc_review=sr.dcc_review) for sr in study_responses]
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1) # One study.
study1 = counts[0]
self.assertEqual(study1[1][0]['tt_total'], 2)
self.assertEqual(study1[1][0]['tt_decision_required_count'], 0) # Decision has been made.
def test_counts_for_two_studies_and_two_tags(self):
"""Correct counts in a complex set of tagged traits that need decisions and already have decisions."""
# There are two studies, two tags, and some decided and some not decided for each tag+study.
study_versions_of_two_studies = SourceStudyVersionFactory.create_batch(2)
tags = factories.TagFactory.create_batch(2)
# Start with making three tagged traits need a decision (one without decision, and one of each decision type).
to_decide = 3
counts_to_match = []
for sv in study_versions_of_two_studies:
study_dict = {'study_name': sv.study.i_study_name, 'study_pk': sv.study.pk}
tag_list = []
for t in tags:
# Make tagged traits for each tag + study.
tagged_traits = factories.TaggedTraitFactory.create_batch(
5, tag=t, trait__source_dataset__source_study_version=sv)
# Make some that need decisions.
study_responses = factories.StudyResponseFactory.create_batch(
to_decide, status=models.StudyResponse.STATUS_DISAGREE,
dcc_review__tagged_trait__trait__source_dataset__source_study_version=sv,
dcc_review__tagged_trait__tag=t)
# Make one that has each decision type.
factories.DCCDecisionFactory.create(
dcc_review=study_responses[0].dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
factories.DCCDecisionFactory.create(
dcc_review=study_responses[1].dcc_review, decision=models.DCCDecision.DECISION_CONFIRM)
tag_dict = {'tt_total': to_decide, 'tag_name': t.title, 'study_pk': sv.study.pk, 'tag_pk': t.pk,
'study_name': sv.study.i_study_name, 'tt_decision_required_count': to_decide - 2}
tag_list.append(tag_dict)
to_decide += 1 # Increment this every time so the counts are distinguishable.
counts_to_match.append((study_dict, tag_list))
# counts_to_match = tuple(counts_to_match)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
# Sometimes the order of the studies wasn't quite right, and sorting didn't work due to dicts.
# So that's why this study-by-study matching was needed.
for study in counts:
study_to_match = [el for el in counts_to_match if el[0] == study[0]][0]
self.assertEqual(study, study_to_match)
def test_counts_exclude_tag_without_decisions_needed(self):
"""Counts exclude a tag that lacks tagged traits requiring decisions."""
extra_tag = factories.TagFactory.create()
extra_tagged_trait = factories.TaggedTraitFactory.create()
study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 1) # One study.
study1 = counts[0]
self.assertEqual(study1[1][0]['tt_total'], 1)
self.assertEqual(study1[1][0]['tt_decision_required_count'], 1)
self.assertEqual(len(study1[1]), 1) # Only one tag.
def test_make_final_decisions_button_present(self):
"""Button to make final decisions (start the deciding loop) is present when some decisions remain unmade."""
study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
response = self.client.get(self.get_url())
study_tag_pks = [study_response.dcc_review.tagged_trait.tag.pk,
study_response.dcc_review.tagged_trait.trait.source_dataset.source_study_version.study.pk]
self.assertContains(response, reverse('tags:tag:study:begin-dcc-decision', args=study_tag_pks))
def test_make_final_decisions_button_not_present(self):
"""Button to make final decisions (start the deciding loop) is not present when all decisions are done."""
study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
decision = factories.DCCDecisionFactory.create(
dcc_review=study_response.dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
response = self.client.get(self.get_url())
study_tag_pks = [study_response.dcc_review.tagged_trait.tag.pk,
study_response.dcc_review.tagged_trait.trait.source_dataset.source_study_version.study.pk]
self.assertNotContains(response, reverse('tags:tag:study:begin-dcc-decision', args=study_tag_pks))
def test_navbar_does_contain_link(self):
"""DCC users do see a link to the dcc decisions summary page in the navbar."""
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.get_url())
def test_counts_for_needfollowup_disagree_tagged_trait_deprecated(self):
"""No counts for a deprecated tagged trait that needs a decision."""
study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
study_version = study_response.dcc_review.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
response = self.client.get(self.get_url())
context = response.context
self.assertIn('grouped_study_tag_counts', context)
counts = context['grouped_study_tag_counts']
self.assertEqual(len(counts), 0)
class TaggedTraitsNeedDCCDecisionSummaryDCCAnalystTest(TaggedTraitsNeedDCCDecisionSummaryTestMixin,
DCCAnalystLoginTestCase):
pass
class TaggedTraitsNeedDCCDecisionSummaryDCCDeveloperTest(TaggedTraitsNeedDCCDecisionSummaryTestMixin,
DCCDeveloperLoginTestCase):
pass
class TaggedTraitsNeedDCCDecisionSummaryOtherUserTest(UserLoginTestCase):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:need-decision', args=args)
def test_get_forbidden(self):
"""Get returns a 403 forbidden status code for regular users."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_navbar_does_not_contain_link(self):
"""Groupless users do not see a link to the dcc decisions summary page."""
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, self.get_url())
class TaggedTraitsNeedDCCDecisionSummaryPhenotypeTaggerTest(PhenotypeTaggerLoginTestCase):
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:need-decision', args=args)
def test_get_forbidden(self):
"""Get returns a 403 forbidden status code for regular users."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_navbar_does_not_contain_link(self):
"""Phenotype taggers do not see a link to the dcc decisions summary page."""
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, self.get_url())
class TaggedTraitsNeedDCCDecisionByTagAndStudyListMixin(object):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.study = StudyFactory.create()
self.study_responses = factories.StudyResponseFactory.create_batch(
3, dcc_review__tagged_trait__tag=self.tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.StudyResponse.STATUS_DISAGREE)
self.tagged_traits = list(models.TaggedTrait.objects.all())
def get_url(self, *args):
return reverse('tags:tag:study:need-decision', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertEqual(response.status_code, 200)
def test_view_with_invalid_study_pk(self):
"""Returns 404 response code when the study pk doesn't exist."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk + 1))
self.assertEqual(response.status_code, 404)
def test_view_with_invalid_tag_pk(self):
"""Returns 404 response code when the pk doesn't exist."""
response = self.client.get(self.get_url(self.tag.pk + 1, self.study.pk))
self.assertEqual(response.status_code, 404)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertIn('study', context)
self.assertIn('tag', context)
self.assertIn('tagged_trait_table', context)
self.assertEqual(context['study'], self.study)
self.assertEqual(context['tag'], self.tag)
def test_table_class(self):
"""The table class is appropriate."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertIsInstance(context['tagged_trait_table'], tables.TaggedTraitDCCDecisionTable)
def test_view_contains_tagged_traits_that_need_decision(self):
"""Table contains TaggedTraits that need dcc decisions."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
table = context['tagged_trait_table']
self.assertEqual(len(table.data), len(self.study_responses))
for study_response in self.study_responses:
self.assertIn(study_response.dcc_review.tagged_trait, table.data,
msg='tagged_trait_table does not contain {}'.format(study_response.dcc_review.tagged_trait))
def test_view_table_does_not_contain_unreviewed_tagged_traits(self):
"""Table does not contain unreviewed TaggedTraits."""
unreviewed_tagged_trait = factories.TaggedTraitFactory.create(
tag=self.tag, trait__source_dataset__source_study_version__study=self.study)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
table = context['tagged_trait_table']
self.assertNotIn(unreviewed_tagged_trait, table.data)
for study_response in self.study_responses:
self.assertIn(study_response.dcc_review.tagged_trait, table.data,
msg='tagged_trait_table does not contain {}'.format(study_response.dcc_review.tagged_trait))
self.assertEqual(len(table.data), len(self.study_responses))
def test_view_table_does_not_contain_tagged_trait_with_no_study_response(self):
"""Table does not contain TaggedTrait without a study response."""
no_response_dcc_review = factories.DCCReviewFactory.create(
tagged_trait__trait__source_dataset__source_study_version__study=self.study,
tagged_trait__tag=self.tag,
status=models.DCCReview.STATUS_FOLLOWUP)
no_response_tagged_trait = no_response_dcc_review.tagged_trait
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
table = context['tagged_trait_table']
self.assertNotIn(no_response_tagged_trait, table.data)
for study_response in self.study_responses:
self.assertIn(study_response.dcc_review.tagged_trait, table.data,
msg='tagged_trait_table does not contain {}'.format(study_response.dcc_review.tagged_trait))
self.assertEqual(len(table.data), len(self.study_responses))
def test_view_table_does_not_contain_tagged_trait_with_agree_response(self):
"""Table does not contain tagged trait with agree study response."""
agree_response = factories.StudyResponseFactory.create(
dcc_review__tagged_trait__tag=self.tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.StudyResponse.STATUS_AGREE)
agree_tagged_trait = agree_response.dcc_review.tagged_trait
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
table = context['tagged_trait_table']
self.assertNotIn(agree_tagged_trait, table.data)
for study_response in self.study_responses:
self.assertIn(study_response.dcc_review.tagged_trait, table.data,
msg='tagged_trait_table does not contain {}'.format(study_response.dcc_review.tagged_trait))
self.assertEqual(len(table.data), len(self.study_responses))
def test_view_table_does_not_contain_deprecated_tagged_traits(self):
"""Table does not contain deprecated TaggedTraits."""
deprecated_response = factories.StudyResponseFactory.create(
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__i_is_deprecated=True,
dcc_review__tagged_trait__tag=self.tag,
status=models.StudyResponse.STATUS_DISAGREE)
deprecated_tagged_trait = deprecated_response.dcc_review.tagged_trait
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
table = context['tagged_trait_table']
self.assertNotIn(deprecated_tagged_trait, table.data)
for study_response in self.study_responses:
self.assertIn(study_response.dcc_review.tagged_trait, table.data,
msg='tagged_trait_table does not contain {}'.format(study_response.dcc_review.tagged_trait))
self.assertEqual(len(table.data), len(self.study_responses))
def test_view_works_with_no_matching_tagged_traits(self):
"""Successful response code when there are no TaggedTraits to include."""
other_tag = factories.TagFactory.create()
response = self.client.get(self.get_url(other_tag.pk, self.study.pk))
self.assertEqual(response.status_code, 200)
context = response.context
self.assertEqual(len(context['tagged_trait_table'].data), 0)
def test_view_does_not_show_tagged_traits_from_a_different_study(self):
"""Table does not include TaggedTraits from a different study."""
other_study = StudyFactory.create()
other_study_response = factories.StudyResponseFactory.create(
status=models.StudyResponse.STATUS_DISAGREE,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=other_study)
other_tagged_trait = other_study_response.dcc_review.tagged_trait
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertNotIn(other_tagged_trait, context['tagged_trait_table'].data)
def test_view_does_not_show_tagged_traits_from_a_different_tag(self):
"""Table does not contain TaggedTraits from a different tag."""
other_tag = factories.TagFactory.create()
other_study_response = factories.StudyResponseFactory.create(
status=models.StudyResponse.STATUS_DISAGREE,
dcc_review__tagged_trait__tag=other_tag)
other_tagged_trait = other_study_response.dcc_review.tagged_trait
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
context = response.context
self.assertNotIn(other_tagged_trait, context['tagged_trait_table'].data)
def test_decision_links_present_for_nodecision_tagged_traits(self):
"""Decision buttons are shown for tagged traits without decision."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
content = str(response.content)
for study_response in self.study_responses:
self.assertIn(
reverse('tags:tagged-traits:pk:dcc-decision:new', args=[study_response.dcc_review.tagged_trait.pk]),
content,
msg='View is missing DCCDecisionCreate link for {}'.format(study_response.dcc_review.tagged_trait)
)
def test_update_link_present_for_decision_confirm_tagged_traits(self):
"""Update button is shown for tagged trait with confirm."""
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=self.study_responses[0].dcc_review, decision=models.DCCDecision.DECISION_CONFIRM)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
content = str(response.content)
self.assertIn(
reverse('tags:tagged-traits:pk:dcc-decision:update', args=[dcc_decision.dcc_review.tagged_trait.pk]),
content
)
def test_update_link_present_for_decision_remove_tagged_traits(self):
"""Update button is shown for tagged trait with remove."""
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=self.study_responses[0].dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
content = str(response.content)
self.assertIn(
reverse('tags:tagged-traits:pk:dcc-decision:update', args=[dcc_decision.dcc_review.tagged_trait.pk]),
content
)
class TaggedTraitsNeedDCCDecisionByTagAndStudyListDCCAnalystTest(TaggedTraitsNeedDCCDecisionByTagAndStudyListMixin,
DCCAnalystLoginTestCase):
pass
class TaggedTraitsNeedDCCDecisionByTagAndStudyListDCCDeveloperTest(TaggedTraitsNeedDCCDecisionByTagAndStudyListMixin,
DCCDeveloperLoginTestCase):
pass
class TaggedTraitsNeedDCCDecisionByTagAndStudyListOtherUserTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.study = StudyFactory.create()
self.study_responses = factories.StudyResponseFactory.create_batch(
3, dcc_review__tagged_trait__tag=self.tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.StudyResponse.STATUS_DISAGREE)
self.tagged_traits = list(models.TaggedTrait.objects.all())
def get_url(self, *args):
return reverse('tags:tag:study:need-decision', args=args)
def test_get_forbidden(self):
"""Get returns a 403 forbidden status code for regular users."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertEqual(response.status_code, 403)
class TaggedTraitsNeedDCCDecisionByTagAndStudyListPhenotypeTaggerTest(PhenotypeTaggerLoginTestCase):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.study = StudyFactory.create()
self.study_responses = factories.StudyResponseFactory.create_batch(
3, dcc_review__tagged_trait__tag=self.tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study,
status=models.StudyResponse.STATUS_DISAGREE)
self.tagged_traits = list(models.TaggedTrait.objects.all())
def get_url(self, *args):
return reverse('tags:tag:study:need-decision', args=args)
def test_get_forbidden(self):
"""Get returns a 403 forbidden status code for regular users."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertEqual(response.status_code, 403)
class DCCDecisionByTagAndStudySelectFromURLDCCTestsMixin(object):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.study = StudyFactory.create()
self.study_responses = factories.StudyResponseFactory.create_batch(
10, status=models.StudyResponse.STATUS_DISAGREE, dcc_review__tagged_trait__tag=self.tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study)
self.tagged_traits = list(models.TaggedTrait.objects.all())
def get_url(self, *args):
return reverse('tags:tag:study:begin-dcc-decision', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk), follow=False)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), fetch_redirect_response=False)
def test_nonexistent_study_404(self):
"""Returns 404 if study does not exist."""
study_pk = self.study.pk
self.study.delete()
response = self.client.get(self.get_url(self.tag.pk, study_pk), follow=False)
self.assertEqual(response.status_code, 404)
def test_nonexistent_tag_404(self):
"""Returns 404 if tag does not exist."""
tag_pk = self.tag.pk
self.tag.delete()
response = self.client.get(self.get_url(tag_pk, self.study.pk), follow=False)
self.assertEqual(response.status_code, 404)
def test_sets_session_variables(self):
"""View has appropriate data in the context and session variables."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk), follow=False)
session = self.client.session
self.assertIn('tagged_trait_decision_by_tag_and_study_info', session)
session_info = session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('study_pk', session_info)
self.assertEqual(session_info['study_pk'], self.study.pk)
self.assertIn('tag_pk', session_info)
self.assertEqual(session_info['tag_pk'], self.tag.pk)
self.assertIn('tagged_trait_pks', session_info)
self.assertEqual(len(session_info['tagged_trait_pks']), len(self.tagged_traits))
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} not in session tagged_trait_pks'.format(tt.pk))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), fetch_redirect_response=False)
def test_excludes_other_tag(self):
"""List of tagged trait pks does not include a second tag."""
other_tag = factories.TagFactory.create()
other_study_response = factories.StudyResponseFactory.create(
status=models.StudyResponse.STATUS_DISAGREE, dcc_review__tagged_trait__tag=other_tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study
)
other_tagged_trait = other_study_response.dcc_review.tagged_trait
response = self.client.get(self.get_url(self.tag.pk, self.study.pk), follow=False)
session = self.client.session
self.assertIn('tagged_trait_decision_by_tag_and_study_info', session)
session_info = session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertEqual(len(session_info['tagged_trait_pks']), len(self.tagged_traits))
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(other_tagged_trait, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly in session tagged_trait_pks'.format(tt.pk))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), fetch_redirect_response=False)
def test_excludes_other_study(self):
"""List of tagged trait pks does not include a different study."""
other_study = StudyFactory.create()
other_study_response = factories.StudyResponseFactory.create(
status=models.StudyResponse.STATUS_DISAGREE, dcc_review__tagged_trait__tag=self.tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=other_study
)
other_tagged_trait = other_study_response.dcc_review.tagged_trait
response = self.client.get(self.get_url(self.tag.pk, self.study.pk), follow=False)
session = self.client.session
self.assertIn('tagged_trait_decision_by_tag_and_study_info', session)
session_info = session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertEqual(len(session_info['tagged_trait_pks']), len(self.tagged_traits))
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(other_tagged_trait, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly in session tagged_trait_pks'.format(tt.pk))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), fetch_redirect_response=False)
def test_excludes_other_study_and_tag(self):
"""List of tagged trait pks does not include tagged traits from another study and tag."""
other_tag = factories.TagFactory.create()
other_study = StudyFactory.create()
other_study_response = factories.StudyResponseFactory.create(
status=models.StudyResponse.STATUS_DISAGREE, dcc_review__tagged_trait__tag=other_tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=other_study
)
other_tagged_trait = other_study_response.dcc_review.tagged_trait
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
session = self.client.session
self.assertIn('tagged_trait_decision_by_tag_and_study_info', session)
session_info = session['tagged_trait_decision_by_tag_and_study_info']
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(other_tagged_trait, session_info['tagged_trait_pks'],
msg='TaggedTrait {} unexpectedly in session tagged_trait_pks'.format(tt.pk))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), fetch_redirect_response=False)
def test_resets_session_variables(self):
"""Correctly overwrites a preexisting session variable with new data."""
self.client.session['tagged_trait_decision_by_tag_and_study_info'] = {
'study_pk': self.study.pk + 1,
'tag_pk': self.tag.pk + 1,
'tagged_trait_pks': [],
}
self.client.session.save()
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
session = self.client.session
self.assertIn('tagged_trait_decision_by_tag_and_study_info', session)
session_info = session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('study_pk', session_info)
self.assertEqual(session_info['study_pk'], self.study.pk)
self.assertIn('tag_pk', session_info)
self.assertEqual(session_info['tag_pk'], self.tag.pk)
self.assertIn('tagged_trait_pks', session_info)
self.assertEqual(len(session_info['tagged_trait_pks']), len(self.tagged_traits))
for tt in self.tagged_traits:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} not in session tagged_trait_pks'.format(tt.pk))
def test_no_tagged_traits_remaining_to_decide_on(self):
"""Redirects properly and displays message when there are no tagged traits to decide on for the tag+study."""
models.TaggedTrait.objects.all().hard_delete()
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertEqual(response.status_code, 302)
# Check for message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('No tagged variables to decide on', str(messages[0]))
def test_no_archived_taggedtraits_in_session_variable(self):
"""Does not include archived tagged traits in session variables."""
archived_tagged_trait = self.tagged_traits[0]
archived_tagged_trait.archive()
archived_tagged_trait.refresh_from_db()
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
# Check session variables.
session = self.client.session
self.assertIn('tagged_trait_decision_by_tag_and_study_info', session)
session_info = session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('study_pk', session_info)
self.assertEqual(session_info['study_pk'], self.study.pk)
self.assertIn('tag_pk', session_info)
self.assertEqual(session_info['tag_pk'], self.tag.pk)
self.assertIn('tagged_trait_pks', session_info)
for tt in self.tagged_traits[1:]:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(archived_tagged_trait.pk, session_info['tagged_trait_pks'])
# The success url redirects again to a new page, so include the target_status_code argument.
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_deprecated_taggedtraits_in_session_variable(self):
"""Does not include deprecated tagged traits in session variables."""
deprecated_tagged_trait = self.tagged_traits[0]
study_version = deprecated_tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
deprecated_tagged_trait.refresh_from_db()
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
# Check session variables.
session = self.client.session
self.assertIn('tagged_trait_decision_by_tag_and_study_info', session)
session_info = session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('study_pk', session_info)
self.assertEqual(session_info['study_pk'], self.study.pk)
self.assertIn('tag_pk', session_info)
self.assertEqual(session_info['tag_pk'], self.tag.pk)
self.assertIn('tagged_trait_pks', session_info)
for tt in self.tagged_traits[1:]:
self.assertIn(tt.pk, session_info['tagged_trait_pks'],
msg='TaggedTrait {} not in session tagged_trait_pks'.format(tt.pk))
self.assertNotIn(deprecated_tagged_trait.pk, session_info['tagged_trait_pks'])
# The success url redirects again to a new page, so include the target_status_code argument.
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
class DCCDecisionByTagAndStudySelectFromURLDCCAnalystTest(DCCDecisionByTagAndStudySelectFromURLDCCTestsMixin,
DCCAnalystLoginTestCase):
# Run all tests in DCCDecisionByTagAndStudySelectFromURLDCCTestsMixin as a DCC analyst.
pass
class DCCDecisionByTagAndStudySelectFromURLDCCDeveloperTest(DCCDecisionByTagAndStudySelectFromURLDCCTestsMixin,
DCCDeveloperLoginTestCase):
# Run all tests in DCCDecisionByTagAndStudySelectFromURLDCCTestsMixin as a DCC developer.
pass
class DCCDecisionByTagAndStudySelectFromURLOtherUserTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.study = StudyFactory.create()
self.study_responses = factories.StudyResponseFactory.create_batch(
10, status=models.StudyResponse.STATUS_DISAGREE, dcc_review__tagged_trait__tag=self.tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study)
self.tagged_traits = list(models.TaggedTrait.objects.all())
def get_url(self, *args):
return reverse('tags:tag:study:begin-dcc-decision', args=args)
def test_forbidden_get_request(self):
"""Get returns forbidden status code for non-DCC users."""
response = self.client.get(self.get_url(self.tag.pk, self.study.pk))
self.assertEqual(response.status_code, 403)
class DCCDecisionByTagAndStudyNextDCCTestsMixin(object):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.study = StudyFactory.create()
self.study_responses = factories.StudyResponseFactory.create_batch(
10, status=models.StudyResponse.STATUS_DISAGREE, dcc_review__tagged_trait__tag=self.tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study)
self.tagged_traits = list(models.TaggedTrait.objects.all())
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:dcc-decision:next', args=args)
def test_view_success_with_no_session_variables(self):
"""Redirects to need_decision summary page when no session variables are set."""
response = self.client.get(self.get_url())
self.assertRedirects(response, reverse('tags:tagged-traits:need-decision'))
def test_view_success_with_tagged_traits_to_decision(self):
"""Redirects to decision loop when there are tagged traits to decide on."""
tagged_trait = self.tagged_traits[0]
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = {
'tag_pk': self.tag.pk,
'study_pk': self.study.pk,
'tagged_trait_pks': [tagged_trait.pk],
}
session.save()
response = self.client.get(self.get_url())
# Make sure a pk session variable was set
session = self.client.session
self.assertIn('tagged_trait_decision_by_tag_and_study_info', session)
self.assertIn('pk', session['tagged_trait_decision_by_tag_and_study_info'])
self.assertEqual(session['tagged_trait_decision_by_tag_and_study_info']['pk'], tagged_trait.pk)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:decide'))
# Check messages.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('You have 1 tagged variable left to decide on.', str(messages[0]))
def test_view_success_with_no_tagged_traits_left(self):
"""Redirects to need_decision summary by tag and study when no tagged traits are left to decide on."""
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = {
'tag_pk': self.tag.pk,
'study_pk': self.study.pk,
'tagged_trait_pks': [],
}
session.save()
response = self.client.get(self.get_url())
self.assertRedirects(response, reverse('tags:tag:study:need-decision', args=[self.tag.pk, self.study.pk]))
# Check that there are no messages.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_session_variables_are_unset_when_decisions_completed(self):
"""Unsets session variables when no tagged traits are left to decide on."""
tag = factories.TagFactory.create()
study = StudyFactory.create()
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = {
'tag_pk': tag.pk,
'study_pk': study.pk,
'tagged_trait_pks': [],
}
session.save()
response = self.client.get(self.get_url())
self.assertNotIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
def test_skips_tagged_trait_with_decision(self):
"""Skips a tagged trait that has been decided on after starting the loop."""
first_tagged_trait = self.tagged_traits[0]
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = {
'tag_pk': self.tag.pk,
'study_pk': self.study.pk,
'tagged_trait_pks': [x.pk for x in self.tagged_traits],
}
session.save()
factories.DCCDecisionFactory.create(dcc_review=first_tagged_trait.dcc_review)
response = self.client.get(self.get_url())
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn('pk', session_info)
self.assertNotIn(first_tagged_trait.pk, session_info['tagged_trait_pks'])
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_skips_deleted_tagged_trait(self):
"""Skips a tagged trait that has been deleted after starting the loop."""
first_tagged_trait = self.tagged_traits[0]
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = {
'tag_pk': self.tag.pk,
'study_pk': self.study.pk,
'tagged_trait_pks': [x.pk for x in self.tagged_traits],
}
session.save()
first_tagged_trait.hard_delete()
response = self.client.get(self.get_url())
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(first_tagged_trait.pk, session_info['tagged_trait_pks'])
self.assertEqual(self.tagged_traits[1].pk, session_info['tagged_trait_pks'][0])
self.assertNotIn('pk', session_info)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_skips_archived_tagged_trait(self):
"""Skips a tagged trait that has been archived after starting the loop."""
first_tagged_trait = self.tagged_traits[0]
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = {
'tag_pk': self.tag.pk,
'study_pk': self.study.pk,
'tagged_trait_pks': [x.pk for x in self.tagged_traits],
}
session.save()
first_tagged_trait.archive()
response = self.client.get(self.get_url())
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(first_tagged_trait.pk, session_info['tagged_trait_pks'])
self.assertEqual(self.tagged_traits[1].pk, session_info['tagged_trait_pks'][0])
self.assertNotIn('pk', session_info)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_skips_no_review_tagged_trait(self):
"""Skips a tagged trait that has no dcc review after starting the loop."""
first_tagged_trait = self.tagged_traits[0]
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = {
'tag_pk': self.tag.pk,
'study_pk': self.study.pk,
'tagged_trait_pks': [x.pk for x in self.tagged_traits],
}
session.save()
first_dcc_review = first_tagged_trait.dcc_review
first_dcc_review.hard_delete()
response = self.client.get(self.get_url())
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(first_tagged_trait.pk, session_info['tagged_trait_pks'])
self.assertEqual(self.tagged_traits[1].pk, session_info['tagged_trait_pks'][0])
self.assertNotIn('pk', session_info)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_skips_review_confirmed_tagged_trait(self):
"""Skips a tagged trait that has been reviewed as confirmed after starting the loop."""
first_tagged_trait = self.tagged_traits[0]
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = {
'tag_pk': self.tag.pk,
'study_pk': self.study.pk,
'tagged_trait_pks': [x.pk for x in self.tagged_traits],
}
session.save()
first_dcc_review = first_tagged_trait.dcc_review
first_dcc_review.status = models.DCCReview.STATUS_CONFIRMED
first_dcc_review.save()
response = self.client.get(self.get_url())
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(first_tagged_trait.pk, session_info['tagged_trait_pks'])
self.assertEqual(self.tagged_traits[1].pk, session_info['tagged_trait_pks'][0])
self.assertNotIn('pk', session_info)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_skips_no_response_tagged_trait(self):
"""Skips a tagged trait that has no study response after starting the loop."""
first_tagged_trait = self.tagged_traits[0]
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = {
'tag_pk': self.tag.pk,
'study_pk': self.study.pk,
'tagged_trait_pks': [x.pk for x in self.tagged_traits],
}
session.save()
first_study_response = first_tagged_trait.dcc_review.study_response
first_study_response.delete()
response = self.client.get(self.get_url())
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(first_tagged_trait.pk, session_info['tagged_trait_pks'])
self.assertEqual(self.tagged_traits[1].pk, session_info['tagged_trait_pks'][0])
self.assertNotIn('pk', session_info)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_skips_response_agree_tagged_trait(self):
"""Skips a tagged trait that has a study response agree after starting the loop."""
first_tagged_trait = self.tagged_traits[0]
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = {
'tag_pk': self.tag.pk,
'study_pk': self.study.pk,
'tagged_trait_pks': [x.pk for x in self.tagged_traits],
}
session.save()
first_study_response = first_tagged_trait.dcc_review.study_response
first_study_response.status = models.StudyResponse.STATUS_AGREE
first_study_response.save()
response = self.client.get(self.get_url())
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(first_tagged_trait.pk, session_info['tagged_trait_pks'])
self.assertEqual(self.tagged_traits[1].pk, session_info['tagged_trait_pks'][0])
self.assertNotIn('pk', session_info)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_skips_deprecated_tagged_trait(self):
"""Skips a tagged trait that has been deprecated after starting the loop."""
deprecated_tagged_trait = self.tagged_traits[0]
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = {
'tag_pk': self.tag.pk,
'study_pk': self.study.pk,
'tagged_trait_pks': [x.pk for x in self.tagged_traits],
}
session.save()
study_version = deprecated_tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
response = self.client.get(self.get_url())
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(deprecated_tagged_trait.pk, session_info['tagged_trait_pks'])
self.assertEqual(self.tagged_traits[1].pk, session_info['tagged_trait_pks'][0])
self.assertNotIn('pk', session_info)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_session_variables_are_not_properly_set(self):
"""Redirects to summary view if expected session variable is not set."""
response = self.client.get(self.get_url())
self.assertRedirects(response, reverse('tags:tagged-traits:need-decision'))
def test_session_variable_missing_required_keys(self):
"""Redirects to summary view if expected session variable dictionary keys are missing."""
template = {
'study_pk': self.study.pk,
'tag_pk': self.tag.pk,
'tagged_trait_pks': [x.pk for x in self.tagged_traits]
}
for key in template.keys():
session_info = copy.copy(template)
session_info.pop(key)
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = session_info
session.save()
response = self.client.get(self.get_url())
self.assertNotIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:need-decision'),
msg_prefix='did not redirect when missing {} in session'.format(key))
class DCCDecisionByTagAndStudyNextDCCAnalystTest(DCCDecisionByTagAndStudyNextDCCTestsMixin, DCCAnalystLoginTestCase):
# Run all tests in DCCDecisionByTagAndStudyNextDCCTestsMixin, as a DCC analyst.
pass
class DCCDecisionByTagAndStudyNextDCCDeveloperTest(DCCDecisionByTagAndStudyNextDCCTestsMixin,
DCCDeveloperLoginTestCase):
# Run all tests in DCCDecisionByTagAndStudyNextDCCTestsMixin, as a DCC developer.
pass
class DCCDecisionByTagAndStudyNextOtherUserTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.study = StudyFactory.create()
self.study_responses = factories.StudyResponseFactory.create_batch(
10, status=models.StudyResponse.STATUS_DISAGREE, dcc_review__tagged_trait__tag=self.tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study)
self.tagged_traits = list(models.TaggedTrait.objects.all())
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:dcc-decision:next', args=args)
def test_forbidden_get_request(self):
"""Returns a response with a forbidden status code for non-DCC users."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_forbidden_post_request(self):
"""Returns a response with a forbidden status code for non-DCC users."""
response = self.client.post(self.get_url(), {})
self.assertEqual(response.status_code, 403)
class DCCDecisionByTagAndStudyDCCTestsMixin(object):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.study = StudyFactory.create()
self.study_response = factories.StudyResponseFactory.create(
status=models.StudyResponse.STATUS_DISAGREE, dcc_review__tagged_trait__tag=self.tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study)
self.tagged_trait = self.study_response.dcc_review.tagged_trait
# Set expected session variables.
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = {
'study_pk': self.study.pk,
'tag_pk': self.tag.pk,
'tagged_trait_pks': [self.tagged_trait.pk],
'pk': self.tagged_trait.pk,
}
session.save()
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:dcc-decision:decide', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url())
context = response.context
self.assertIn('form', context)
self.assertIsInstance(context['form'], forms.DCCDecisionByTagAndStudyForm)
self.assertIn('tagged_trait', context)
self.assertEqual(context['tagged_trait'], self.tagged_trait)
self.assertIn('tag', context)
self.assertEqual(context['tag'], self.tag)
self.assertIn('study', context)
self.assertEqual(context['study'], self.study)
self.assertIn('n_tagged_traits_remaining', context)
self.assertEqual(context['n_tagged_traits_remaining'], 1)
def test_context_data_with_multiple_remaining_tagged_traits(self):
"""View has appropriate data in the context if there are multiple tagged traits to decide on."""
more_study_responses = factories.StudyResponseFactory.create_batch(
3, status=models.StudyResponse.STATUS_DISAGREE, dcc_review__tagged_trait__tag=self.tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study)
session = self.client.session
tagged_trait_list = list(models.TaggedTrait.objects.values_list('pk', flat=True))
session['tagged_trait_decision_by_tag_and_study_info']['tagged_trait_pks'] = tagged_trait_list
session.save()
response = self.client.get(self.get_url())
context = response.context
self.assertIn('form', context)
self.assertIsInstance(context['form'], forms.DCCDecisionByTagAndStudyForm)
self.assertIn('tagged_trait', context)
self.assertEqual(context['tagged_trait'], self.tagged_trait)
self.assertIn('tag', context)
self.assertEqual(context['tag'], self.tag)
self.assertIn('study', context)
self.assertEqual(context['study'], self.study)
self.assertIn('n_tagged_traits_remaining', context)
self.assertEqual(context['n_tagged_traits_remaining'], models.TaggedTrait.objects.count())
def test_successful_post_confirm_decision(self):
"""Posting valid data to the form correctly creates a DCCDecision."""
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'Looks good.'}
response = self.client.post(self.get_url(), form_data)
# Correctly creates a DCCDecision for this TaggedTrait.
dcc_decision = models.DCCDecision.objects.latest('created')
self.assertEqual(self.tagged_trait.dcc_review.dcc_decision, dcc_decision)
# The pk session variable is correctly unset.
session = self.client.session
self.assertIn('tagged_trait_decision_by_tag_and_study_info', session)
session_info = session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully made a final decision', str(messages[0]))
# Correctly redirects to the next view (remembering that it is a redirect view).
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_successful_post_remove_decision(self):
"""Posting valid data to the form correctly creates a DCCDecision."""
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_REMOVE: 'Remove', 'comment': 'Definitely remove it.'}
response = self.client.post(self.get_url(), form_data)
# Correctly creates a DCCDecision for this TaggedTrait.
dcc_decision = models.DCCDecision.objects.latest('created')
self.assertEqual(self.tagged_trait.dcc_review.dcc_decision, dcc_decision)
# The pk session variable is correctly unset.
session = self.client.session
self.assertIn('tagged_trait_decision_by_tag_and_study_info', session)
session_info = session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully made a final decision', str(messages[0]))
# Correctly redirects to the next view (remembering that it is a redirect view).
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_post_invalid_missing_comment(self):
"""Posting bad data to the form shows a form error and doesn't unset session variables."""
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_REMOVE: 'Remove', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
self.assertEqual(response.status_code, 200)
# Does not create a DCCDecision for this TaggedTrait.
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'dcc_decision'))
self.assertFormError(response, 'form', 'comment', 'Comment cannot be blank.')
# The pk session variable is not unset.
session = self.client.session
self.assertIn('tagged_trait_decision_by_tag_and_study_info', session)
session_info = session['tagged_trait_decision_by_tag_and_study_info']
self.assertIn('pk', session_info)
self.assertEqual(session_info['pk'], self.tagged_trait.pk)
# No messages.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_successfully_skip_tagged_trait(self):
"""Skipping a TaggedTrait unsets pk and redirects to the next view."""
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_SKIP: 'Skip'}
response = self.client.post(self.get_url(), form_data)
# Does not create a DCCDecision for this TaggedTrait.
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'dcc_decision'))
# Session variables are properly set/unset.
session = self.client.session
self.assertIn('tagged_trait_decision_by_tag_and_study_info', session)
session_info = session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# The redirect view unsets some session variables, so check it at the end.
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_failure_for_non_existent_tagged_trait(self):
"""Returns a 404 page if the session variable pk doesn't exist."""
self.tagged_trait.hard_delete()
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
def test_message_and_redirect_for_archived_tagged_trait(self):
"""Shows warning message and does not save decision if TaggedTrait is archived."""
self.tagged_trait.archive()
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'Looks good.'}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('been archived', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_message_and_redirect_for_archived_tagged_trait_with_form_error(self):
"""Shows warning message and redirects if TaggedTrait is archived, even if there's a form error."""
self.tagged_trait.archive()
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_REMOVE: 'Remove', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('been archived', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_message_and_redirect_for_no_review_tagged_trait(self):
"""Shows warning message and does not save decision if TaggedTrait is missing dcc review."""
self.tagged_trait.dcc_review.hard_delete()
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'Looks good.'}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('missing a dcc review', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_message_and_redirect_for_no_review_tagged_trait_with_form_error(self):
"""Shows warning message and redirects if TaggedTrait is missing dcc review, even if there's a form error."""
self.tagged_trait.dcc_review.hard_delete()
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_REMOVE: 'Remove', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('missing a dcc review', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_message_and_redirect_for_confirmed_review_tagged_trait(self):
"""Shows warning message and does not save decision if TaggedTrait has review status confirmed."""
self.tagged_trait.dcc_review.status = models.DCCReview.STATUS_CONFIRMED
self.tagged_trait.dcc_review.save()
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'Looks good.'}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('dcc review status is "confirmed"', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_message_and_redirect_for_confirmed_review_tagged_trait_with_form_error(self):
"""Shows warning message and redirects if review status confirmed, even if there's a form error."""
self.tagged_trait.dcc_review.status = models.DCCReview.STATUS_CONFIRMED
self.tagged_trait.dcc_review.save()
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_REMOVE: 'Remove', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('dcc review status is "confirmed"', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_message_and_redirect_for_no_response_tagged_trait(self):
"""Shows warning message and does not save decision if TaggedTrait is missing study response."""
self.tagged_trait.dcc_review.study_response.delete()
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'Looks good.'}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('missing a study response', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_message_and_redirect_for_no_response_tagged_trait_with_form_error(self):
"""Shows warning message and redirects if missing study response, even if there's a form error."""
self.tagged_trait.dcc_review.study_response.delete()
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_REMOVE: 'Remove', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('missing a study response', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_message_and_redirect_for_agree_response_tagged_trait(self):
"""Shows warning message and does not save decision if TaggedTrait study response is agree."""
self.tagged_trait.dcc_review.study_response.status = models.StudyResponse.STATUS_AGREE
self.tagged_trait.dcc_review.study_response.save()
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'Looks good.'}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('study response status is "agree"', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_message_and_redirect_for_agree_response_tagged_trait_with_form_error(self):
"""Shows warning message and redirects if study response is agree, even if there's a form error."""
self.tagged_trait.dcc_review.study_response.status = models.StudyResponse.STATUS_AGREE
self.tagged_trait.dcc_review.study_response.save()
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_REMOVE: 'Remove', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('study response status is "agree"', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_message_and_redirect_for_tagged_trait_with_decision(self):
"""Shows warning message and does not save decision if TaggedTrait is already decided."""
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review,
decision=models.DCCDecision.DECISION_REMOVE
)
# Now try to decide on it (with different decision) through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'Looks good.'}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('already has a decision made', str(messages[0]))
# The previous DCCDecision was not updated.
self.assertEqual(self.tagged_trait.dcc_review.dcc_decision, dcc_decision)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_message_and_redirect_for_tagged_trait_with_decision_with_form_error(self):
"""Shows warning message and redirects if TaggedTrait already has decision, even if there's a form error."""
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review,
decision=models.DCCDecision.DECISION_CONFIRM,
comment='looks good'
)
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_REMOVE: 'Remove', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('already has a decision made', str(messages[0]))
# The previous DCCDecision was not updated.
self.assertEqual(self.tagged_trait.dcc_review.dcc_decision, dcc_decision)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_successful_skip_tagged_trait_with_decision(self):
"""Redirects without a message if an already-decided tagged trait is skipped."""
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review,
decision=models.DCCDecision.DECISION_CONFIRM,
comment='looks good'
)
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_SKIP: 'Skip', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check that no message was generated.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
# The previous DCCDecision was not updated.
self.assertEqual(self.tagged_trait.dcc_review.dcc_decision, dcc_decision)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_get_redirects_if_session_variables_are_not_properly_set(self):
"""Get redirects to summary view if expected session variable is not set."""
session = self.client.session
del session['tagged_trait_decision_by_tag_and_study_info']
session.save()
response = self.client.get(self.get_url())
self.assertRedirects(response, reverse('tags:tagged-traits:need-decision'))
def test_post_redirects_if_session_variables_are_not_properly_set(self):
"""Post redirects to summary view if expected session variable is not set."""
session = self.client.session
del session['tagged_trait_decision_by_tag_and_study_info']
session.save()
response = self.client.post(self.get_url(), {})
self.assertRedirects(response, reverse('tags:tagged-traits:need-decision'))
def test_get_redirects_if_session_variable_missing_key_tag_pk(self):
"""Get redirects to summary view if tag pk expected session variable dictionary key is missing."""
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'].pop('tag_pk')
session.save()
response = self.client.get(self.get_url())
self.assertNotIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:need-decision'))
def test_get_redirects_if_session_variable_missing_key_study_pk(self):
"""Get redirects to select view if study pk expected session variable dictionary key is missing."""
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'].pop('study_pk')
session.save()
response = self.client.get(self.get_url())
self.assertNotIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:need-decision'))
def test_get_redirects_if_session_variable_missing_key_tagged_trait_pks(self):
"""Get redirects to select view if tagged trait pks expected session variable dictionary key is missing."""
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'].pop('tagged_trait_pks')
session.save()
response = self.client.get(self.get_url())
self.assertNotIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:need-decision'))
def test_get_redirects_if_session_variable_missing_key_pk(self):
"""Get redirects to summary view if pk expected session variable dictionary key is missing."""
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'].pop('pk')
session.save()
response = self.client.get(self.get_url())
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_post_redirects_if_session_variable_missing_key_tag_pk(self):
"""Post redirects to select view if tag pk expected session variable dictionary key is missing."""
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'].pop('tag_pk')
session.save()
response = self.client.post(self.get_url(), {})
self.assertNotIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:need-decision'))
def test_post_redirects_if_session_variable_missing_key_study_pk(self):
"""Post redirects to select view if study pk expected session variable dictionary key is missing."""
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'].pop('study_pk')
session.save()
response = self.client.post(self.get_url(), {})
self.assertNotIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:need-decision'))
def test_post_redirects_if_session_variable_missing_key_tagged_trait_pks(self):
"""Post redirects to select view if trait pk expected session variable dictionary key is missing."""
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'].pop('tagged_trait_pks')
session.save()
response = self.client.post(self.get_url(), {})
self.assertNotIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
self.assertRedirects(response, reverse('tags:tagged-traits:need-decision'))
def test_post_redirects_if_session_variable_missing_key_pk(self):
"""Post redirects to select view if tagged trait pk expected session variable dictionary key is missing."""
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'].pop('pk')
session.save()
response = self.client.post(self.get_url(), {})
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_shows_other_tags(self):
"""Other tags linked to the same trait are shown on the page."""
another_tagged_trait = factories.TaggedTraitFactory.create(trait=self.tagged_trait.trait)
response = self.client.get(self.get_url())
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_trait.tag.title, content)
def test_shows_archived_other_tags(self):
"""Other tags linked to the same trait are shown on the page."""
another_tagged_trait = factories.TaggedTraitFactory.create(trait=self.tagged_trait.trait, archived=True)
response = self.client.get(self.get_url())
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_trait.tag.title, content)
def test_shows_tag_only_once_when_it_is_archived(self):
"""The tag is only shown once, even when the tagged variable is archived."""
self.tagged_trait.archive()
response = self.client.get(self.get_url())
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertNotIn(self.tagged_trait.tag, context['other_tags'])
self.assertNotIn(self.tagged_trait.tag, context['archived_other_tags'])
def test_archives_tagged_trait_with_dccdecision_remove(self):
"""Creating a remove DCCDecision archives the tagged trait."""
self.assertFalse(self.tagged_trait.archived)
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_REMOVE: 'Remove', 'comment': 'get rid of it'}
response = self.client.post(self.get_url(), form_data)
dcc_decision = models.DCCDecision.objects.latest('created')
self.tagged_trait.refresh_from_db()
self.assertTrue(self.tagged_trait.archived)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_tagged_trait_nonarchived_after_dccdecision_confirm(self):
"""Creating a confirm DCCDecision results in the tagged trait being non-archived."""
self.assertFalse(self.tagged_trait.archived)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(self.get_url(), form_data)
dcc_decision = models.DCCDecision.objects.latest('created')
self.tagged_trait.refresh_from_db()
self.assertFalse(self.tagged_trait.archived)
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_message_and_redirect_for_deprecated_tagged_trait(self):
"""Shows warning message and does not save decision if TaggedTrait's study version is deprecated."""
study_version = self.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'Looks good.'}
response = self.client.post(self.get_url(), form_data)
self.tagged_trait.refresh_from_db()
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'dcc_decision'))
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('newer version', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
def test_message_and_redirect_for_deprecated_tagged_trait_with_form_error(self):
"""Shows warning message and does not save decision if TaggedTrait's study version is deprecated, with form error.""" # noqa
study_version = self.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
# Now try to decide on it through the web interface.
form_data = {forms.DCCDecisionByTagAndStudyForm.SUBMIT_REMOVE: 'Remove', 'comment': ''}
response = self.client.post(self.get_url(), form_data)
self.tagged_trait.refresh_from_db()
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'dcc_decision'))
# Check session variables.
self.assertIn('tagged_trait_decision_by_tag_and_study_info', self.client.session)
session_info = self.client.session['tagged_trait_decision_by_tag_and_study_info']
self.assertNotIn('pk', session_info)
self.assertIn('tagged_trait_pks', session_info)
self.assertNotIn(self.tagged_trait.pk, session_info['tagged_trait_pks'])
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('newer version', str(messages[0]))
self.assertRedirects(response, reverse('tags:tagged-traits:dcc-decision:next'), target_status_code=302)
class DCCDecisionByTagAndStudyDCCAnalystTest(DCCDecisionByTagAndStudyDCCTestsMixin, DCCAnalystLoginTestCase):
# Run all tests in DCCDecisionByTagAndStudyDCCTestsMixin, as a DCC analyst.
pass
class DCCDecisionByTagAndStudyDCCDeveloperTest(DCCDecisionByTagAndStudyDCCTestsMixin, DCCDeveloperLoginTestCase):
# Run all tests in DCCDecisionByTagAndStudyDCCTestsMixin, as a DCC developer.
pass
class DCCDecisionByTagAndStudyOtherUserTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.tag = factories.TagFactory.create()
self.study = StudyFactory.create()
self.study_response = factories.StudyResponseFactory.create(
status=models.StudyResponse.STATUS_DISAGREE, dcc_review__tagged_trait__tag=self.tag,
dcc_review__tagged_trait__trait__source_dataset__source_study_version__study=self.study)
self.tagged_trait = self.study_response.dcc_review.tagged_trait
# Set expected session variables.
session = self.client.session
session['tagged_trait_decision_by_tag_and_study_info'] = {
'study_pk': self.study.pk,
'tag_pk': self.tag.pk,
'tagged_trait_pks': [self.tagged_trait.pk],
'pk': self.tagged_trait.pk,
}
session.save()
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:dcc-decision:decide', args=args)
def test_forbidden_get_request(self):
"""Returns a response with a forbidden status code for non-DCC users."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 403)
def test_forbidden_post_request(self):
"""Returns a response with a forbidden status code for non-DCC users."""
response = self.client.post(self.get_url(), {})
self.assertEqual(response.status_code, 403)
class DCCDecisionCreateDCCTestsMixin(object):
def setUp(self):
super().setUp()
self.study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
self.tagged_trait = self.study_response.dcc_review.tagged_trait
self.need_decision_url = reverse('tags:tag:study:need-decision',
args=[self.tagged_trait.tag.pk,
self.tagged_trait.trait.source_dataset.source_study_version.study.pk])
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:pk:dcc-decision:new', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertIn('form', context)
self.assertIsInstance(context['form'], forms.DCCDecisionForm)
self.assertIn('tagged_trait', context)
self.assertEqual(context['tagged_trait'], self.tagged_trait)
def test_post_confirm_decision_creates_decision(self):
"""Posting valid data to the form correctly creates a DCCDecision."""
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, self.need_decision_url)
# Correctly creates a DCCDecision for this TaggedTrait.
dcc_decision = models.DCCDecision.objects.latest('created')
self.assertEqual(self.tagged_trait.dcc_review.dcc_decision, dcc_decision)
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully made final decision for', str(messages[0]))
def test_remove_decision_creates_decision(self):
"""Posting valid data to the form correctly creates a DCCDecision."""
form_data = {forms.DCCDecisionForm.SUBMIT_REMOVE: 'Remove', 'comment': 'foo'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, self.need_decision_url)
# Correctly creates a DCCDecision for this TaggedTrait.
dcc_decision = models.DCCDecision.objects.latest('created')
self.assertEqual(self.tagged_trait.dcc_review.dcc_decision, dcc_decision)
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully made final decision for', str(messages[0]))
def test_form_error_missing_comment_for_remove(self):
"""Posting bad data to the form shows a form error."""
form_data = {forms.DCCDecisionForm.SUBMIT_REMOVE: 'Remove', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'comment', 'Comment cannot be blank.')
# Does not create a DCCDecision for this TaggedTrait.
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'dcc_decision'))
# No messages.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_form_error_missing_comment_for_confirm(self):
"""Posting bad data to the form shows a form error."""
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'comment', 'Comment cannot be blank.')
# Does not create a DCCDecision for this TaggedTrait.
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'dcc_decision'))
# No messages.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_get_view_with_invalid_tagged_trait_pk(self):
"""Returns a 404 page with a get request if the tagged trai doesn't exist."""
url = self.get_url(self.tagged_trait.pk + 1)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_post_view_with_invalid_tagged_trait_pk(self):
"""Returns a 404 page if the session varaible pk doesn't exist."""
url = self.get_url(self.tagged_trait.pk + 1)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(url, form_data)
self.assertEqual(response.status_code, 404)
def test_get_message_and_redirect_archived_tagged_trait(self):
"""Get request gives a warning message and redirects if the tagged trait is archived."""
self.tagged_trait.archive()
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('been archived', str(messages[0]))
def test_post_message_and_redirect_archived_tagged_trait(self):
"""Post request gives a warning message and redirects if the tagged trait is archived."""
self.tagged_trait.archive()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(url, form_data)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('been archived', str(messages[0]))
def test_post_message_and_redirect_archived_tagged_trait_with_form_error(self):
"""Post request gives a warning message and redirects if the tagged trait is archived, even with bad data."""
self.tagged_trait.archive()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('been archived', str(messages[0]))
def test_get_message_and_redirect_missing_review_tagged_trait(self):
"""Get request gives a warning message and redirects if the tagged trait has no dcc review."""
self.tagged_trait.dcc_review.hard_delete()
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot create', str(messages[0]))
def test_post_message_and_redirect_missing_review_tagged_trait(self):
"""Post request gives a warning message and redirects if the tagged trait has no dcc review."""
self.tagged_trait.dcc_review.hard_delete()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(url, form_data)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot create', str(messages[0]))
def test_post_message_and_redirect_missing_review_tagged_trait_with_form_error(self):
"""Post request gives a warning message and redirects if no dcc review, even with bad data."""
self.tagged_trait.dcc_review.hard_delete()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot create', str(messages[0]))
def test_get_message_and_redirect_review_confirmed_tagged_trait(self):
"""Get request gives a warning message and redirects if the tagged trait has dcc review status confirmed."""
self.tagged_trait.dcc_review.status = models.DCCReview.STATUS_CONFIRMED
self.tagged_trait.dcc_review.save()
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot create', str(messages[0]))
def test_post_message_and_redirect_review_confirmed_tagged_trait(self):
"""Post request gives a warning message and redirects if the tagged trait has dcc review status confirmed."""
self.tagged_trait.dcc_review.status = models.DCCReview.STATUS_CONFIRMED
self.tagged_trait.dcc_review.save()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(url, form_data)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot create', str(messages[0]))
def test_post_message_and_redirect_review_confirmed_tagged_trait_with_form_error(self):
"""Post request gives a warning message and redirects if dcc review status confirmed, even with bad data."""
self.tagged_trait.dcc_review.status = models.DCCReview.STATUS_CONFIRMED
self.tagged_trait.dcc_review.save()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot create', str(messages[0]))
def test_get_message_and_redirect_missing_response_tagged_trait(self):
"""Get request gives a warning message and redirects if the tagged trait has no study response."""
self.tagged_trait.dcc_review.study_response.delete()
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot create', str(messages[0]))
def test_post_message_and_redirect_missing_response_tagged_trait(self):
"""Post request gives a warning message and redirects if the tagged trait has no study response."""
self.tagged_trait.dcc_review.study_response.delete()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(url, form_data)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot create', str(messages[0]))
def test_post_message_and_redirect_missing_response_tagged_trait_with_form_error(self):
"""Post request gives a warning message and redirects if no study response, even with bad data."""
self.tagged_trait.dcc_review.study_response.delete()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot create', str(messages[0]))
def test_get_message_and_redirect_response_agree_tagged_trait(self):
"""Get request gives a warning message and redirects if the tagged trait has study response status agree."""
self.tagged_trait.dcc_review.study_response.status = models.StudyResponse.STATUS_AGREE
self.tagged_trait.dcc_review.study_response.save()
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot create', str(messages[0]))
def test_post_message_and_redirect_response_agree_tagged_trait(self):
"""Post request gives a warning message and redirects if the tagged trait has study response status agree."""
self.tagged_trait.dcc_review.study_response.status = models.StudyResponse.STATUS_AGREE
self.tagged_trait.dcc_review.study_response.save()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(url, form_data)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot create', str(messages[0]))
def test_post_message_and_redirect_response_agree_tagged_trait_with_form_error(self):
"""Post request gives a warning message and redirects if study response status agree, even with bad data."""
self.tagged_trait.dcc_review.study_response.status = models.StudyResponse.STATUS_AGREE
self.tagged_trait.dcc_review.study_response.save()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot create', str(messages[0]))
def test_get_message_and_redirect_to_update_for_previous_decision(self):
"""Shows warning message and redirects to update page if TaggedTrait is already decided."""
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review,
decision=models.DCCDecision.DECISION_REMOVE
)
# Now try to review it through the web interface.
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertRedirects(
response, reverse('tags:tagged-traits:pk:dcc-decision:update', args=[self.tagged_trait.pk]))
# Check for warning message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Switched to updating decision', str(messages[0]))
# The previous DCCDecision was not updated.
self.assertEqual(self.tagged_trait.dcc_review.dcc_decision, dcc_decision)
def test_post_message_and_redirect_to_update_for_previous_decision(self):
"""Shows warning message and does not save decision if TaggedTrait is already decided."""
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review,
decision=models.DCCDecision.DECISION_REMOVE
)
# Now try to review it through the web interface.
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(
response, reverse('tags:tagged-traits:pk:dcc-decision:update', args=[self.tagged_trait.pk]))
# Check for warning message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Switched to updating', str(messages[0]))
# The previous DCCDecision was not updated.
self.assertEqual(self.tagged_trait.dcc_review.dcc_decision, dcc_decision)
def test_post_message_and_redirect_to_update_for_previous_decision_with_form_error(self):
"""Shows warning message and redirects if TaggedTrait is already decided, with a form error."""
dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review,
decision=models.DCCDecision.DECISION_REMOVE
)
# Now try to review it through the web interface.
form_data = {forms.DCCDecisionForm.SUBMIT_REMOVE: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(
response, reverse('tags:tagged-traits:pk:dcc-decision:update', args=[self.tagged_trait.pk]))
# Check for warning message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Switched to updating', str(messages[0]))
# The previous DCCDecision was not updated.
self.assertEqual(self.tagged_trait.dcc_review.dcc_decision, dcc_decision)
def test_shows_other_tags(self):
"""Other tags linked to the same trait are included in the page."""
another_tagged_trait = factories.TaggedTraitFactory.create(trait=self.tagged_trait.trait)
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_trait.tag.title, content)
def test_shows_archived_other_tags(self):
"""Other tags linked to the same trait are included in the page."""
another_tagged_trait = factories.TaggedTraitFactory.create(trait=self.tagged_trait.trait, archived=True)
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_trait.tag.title, content)
def test_archives_tagged_trait_with_dccdecision_remove(self):
"""Creating a remove DCCDecision archives the tagged trait."""
self.assertFalse(self.tagged_trait.archived)
form_data = {forms.DCCDecisionForm.SUBMIT_REMOVE: 'Remove', 'comment': 'get rid of it'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
dcc_decision = models.DCCDecision.objects.latest('created')
self.tagged_trait.refresh_from_db()
self.assertTrue(self.tagged_trait.archived)
def test_tagged_trait_nonarchived_after_dccdecision_confirm(self):
"""Creating a confirm DCCDecision results in the tagged trait being non-archived."""
self.assertFalse(self.tagged_trait.archived)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
dcc_decision = models.DCCDecision.objects.latest('created')
self.tagged_trait.refresh_from_db()
self.assertFalse(self.tagged_trait.archived)
def test_get_message_and_redirect_response_deprecated_tagged_trait(self):
"""Get request gives a warning message and redirects if the tagged trait is deprecated."""
study_version = self.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url)
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Oops!', str(messages[0]))
self.assertIn('newer version', str(messages[0]))
def test_post_message_and_redirect_response_deprecated_tagged_trait(self):
"""Post request gives a warning message and redirects if the tagged trait is deprecated."""
study_version = self.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(url, form_data)
self.tagged_trait.refresh_from_db()
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'dcc_decision'))
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Oops!', str(messages[0]))
self.assertIn('newer version', str(messages[0]))
class DCCDecisionCreateDCCAnalystTest(DCCDecisionCreateDCCTestsMixin, DCCAnalystLoginTestCase):
# Run all tests in DCCDecisionCreateDCCTestsMixin, as a DCC analyst.
pass
class DCCDecisionCreateDCCDeveloperTest(DCCDecisionCreateDCCTestsMixin, DCCDeveloperLoginTestCase):
# Run all tests in DCCDecisionCreateDCCTestsMixin, as a DCC developer.
pass
class DCCDecisionCreateOtherUserTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
self.tagged_trait = self.study_response.dcc_review.tagged_trait
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:pk:dcc-decision:new', args=args)
def test_forbidden_get_request(self):
"""Get returns forbidden status code for non-DCC users."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_post_request(self):
"""Post returns forbidden status code for non-DCC users."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertEqual(response.status_code, 403)
def test_forbidden_get_request_with_existing_decision(self):
"""Get returns forbidden status code for non-DCC users when decision exists."""
factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_post_request_with_existing_decision(self):
"""Post returns forbidden status code for non-DCC users when decision exists."""
factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertEqual(response.status_code, 403)
class DCCDecisionUpdateDCCTestsMixin(object):
def setUp(self):
super().setUp()
self.study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
self.tagged_trait = self.study_response.dcc_review.tagged_trait
self.dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review, decision=models.DCCDecision.DECISION_CONFIRM)
self.need_decision_url = reverse('tags:tag:study:need-decision',
args=[self.tagged_trait.tag.pk,
self.tagged_trait.trait.source_dataset.source_study_version.study.pk])
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:pk:dcc-decision:update', args=args)
def test_view_success_code(self):
"""Returns successful response code."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 200)
def test_context_data(self):
"""View has appropriate data in the context."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertIn('form', context)
self.assertIsInstance(context['form'], forms.DCCDecisionForm)
self.assertIn('tagged_trait', context)
self.assertEqual(context['tagged_trait'], self.tagged_trait)
def test_post_confirm_decision_updates_comment(self):
"""Posting valid data to the form correctly updates a DCCDecision by changing comment."""
original_comment = self.dcc_decision.comment
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, self.need_decision_url)
self.dcc_decision.refresh_from_db()
# Correctly updates a DCCDecision for this TaggedTrait.
updated_dcc_decision = models.DCCDecision.objects.latest('modified')
self.assertEqual(self.dcc_decision, updated_dcc_decision)
self.assertNotEqual(self.dcc_decision.comment, original_comment)
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully updated', str(messages[0]))
def test_post_remove_decision_updates_decision_and_comment(self):
"""Posting valid data to the form correctly updates a DCCDecision decision and comment."""
original_comment = self.dcc_decision.comment
form_data = {forms.DCCDecisionForm.SUBMIT_REMOVE: 'Remove', 'comment': 'foo'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, self.need_decision_url)
self.dcc_decision.refresh_from_db()
# Correctly updates a DCCDecision for this TaggedTrait.
updated_dcc_decision = models.DCCDecision.objects.latest('modified')
self.assertEqual(self.dcc_decision, updated_dcc_decision)
self.assertNotEqual(self.dcc_decision.comment, original_comment)
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully updated', str(messages[0]))
def test_change_confirm_to_remove(self):
"""Updating a dcc decision from confirm to remove is successful."""
self.dcc_decision.delete()
self.dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review, decision=models.DCCDecision.DECISION_CONFIRM)
form_data = {forms.DCCDecisionForm.SUBMIT_REMOVE: 'Remove', 'comment': 'remove it'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, self.need_decision_url)
self.dcc_decision.refresh_from_db()
updated_dcc_decision = models.DCCDecision.objects.latest('modified')
self.assertEqual(self.dcc_decision, updated_dcc_decision)
self.assertEqual(self.dcc_decision.decision, models.DCCDecision.DECISION_REMOVE)
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully updated', str(messages[0]))
def test_change_remove_to_confirm(self):
"""Updating a dcc decision from remove to confirm is successful."""
self.dcc_decision.delete()
self.dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
self.tagged_trait.archive()
self.tagged_trait.refresh_from_db()
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'keep it'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(response, self.need_decision_url)
self.dcc_decision.refresh_from_db()
updated_dcc_decision = models.DCCDecision.objects.latest('modified')
self.assertEqual(self.dcc_decision, updated_dcc_decision)
self.assertEqual(self.dcc_decision.decision, models.DCCDecision.DECISION_CONFIRM)
# Check for success message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully updated', str(messages[0]))
def test_form_error_missing_comment_for_remove(self):
"""Posting bad data to the form shows a form error and does not update decision."""
form_data = {forms.DCCDecisionForm.SUBMIT_REMOVE: 'Remove', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertEqual(response.status_code, 200)
self.dcc_decision.refresh_from_db()
self.assertFormError(response, 'form', 'comment', 'Comment cannot be blank.')
# Does not modify a DCCDecision for this TaggedTrait.
self.assertNotEqual(self.dcc_decision.comment, form_data['comment'])
self.assertNotEqual(self.dcc_decision.decision, models.DCCDecision.DECISION_REMOVE)
# No messages.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_form_error_missing_comment_for_confirm(self):
"""Posting bad data to the form shows a form error."""
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertEqual(response.status_code, 200)
self.dcc_decision.refresh_from_db()
self.assertFormError(response, 'form', 'comment', 'Comment cannot be blank.')
# Does not modify a DCCDecision for this TaggedTrait.
self.assertNotEqual(self.dcc_decision.comment, form_data['comment'])
# No messages.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_get_view_with_invalid_tagged_trait_pk(self):
"""Returns a 404 page with a get request if the tagged trai doesn't exist."""
url = self.get_url(self.tagged_trait.pk + 1)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_post_view_with_invalid_tagged_trait_pk(self):
"""Returns a 404 page if the session varaible pk doesn't exist."""
url = self.get_url(self.tagged_trait.pk + 1)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(url, form_data)
self.assertEqual(response.status_code, 404)
def test_get_message_and_redirect_missing_review_tagged_trait(self):
"""Get request gives a warning message and redirects if the tagged trait has no dcc review."""
self.tagged_trait.dcc_review.hard_delete() # Also deletes dcc_decision and study_response!
# Reset the objects saved to this testcase.
self.tagged_trait = models.TaggedTrait.objects.get(pk=self.tagged_trait.pk)
self.study_response = None
self.dcc_decision = None
self.assertFalse(hasattr(self.tagged_trait, 'dcc_review'))
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url, follow=True)
create_decision_url = reverse('tags:tagged-traits:pk:dcc-decision:new', args=[self.tagged_trait.pk])
# Redirects first to the create page, then to the need_decision page.
self.assertEqual(response.redirect_chain, [(create_decision_url, 302, ), (self.need_decision_url, 302, )])
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 2)
self.assertIn('Switched to creating', str(messages[0]))
self.assertIn('Cannot create', str(messages[1]))
self.assertFalse(hasattr(self.tagged_trait, 'dcc_review'))
def test_post_message_and_redirect_missing_review_tagged_trait(self):
"""Post request gives a warning message and redirects if the tagged trait has no dcc review."""
self.tagged_trait.dcc_review.hard_delete() # Also deletes dcc_decision and study_response!
# Reset the objects saved to this testcase.
self.tagged_trait = models.TaggedTrait.objects.get(pk=self.tagged_trait.pk)
self.study_response = None
self.dcc_decision = None
self.assertFalse(hasattr(self.tagged_trait, 'dcc_review'))
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(url, form_data, follow=True)
create_decision_url = reverse('tags:tagged-traits:pk:dcc-decision:new', args=[self.tagged_trait.pk])
# Redirects first to the create page, then to the need_decision page.
self.assertEqual(response.redirect_chain, [(create_decision_url, 302, ), (self.need_decision_url, 302, )])
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 2)
self.assertIn('Switched to creating', str(messages[0]))
self.assertIn('Cannot create', str(messages[1]))
self.assertFalse(hasattr(self.tagged_trait, 'dcc_review'))
def test_post_message_and_redirect_missing_review_tagged_trait_with_form_error(self):
"""Post request gives a warning message and redirects if no dcc review, even with bad data."""
self.tagged_trait.dcc_review.hard_delete() # Also deletes dcc_decision and study_response!
# Reset the objects saved to this testcase.
self.tagged_trait = models.TaggedTrait.objects.get(pk=self.tagged_trait.pk)
self.study_response = None
self.dcc_decision = None
self.assertFalse(hasattr(self.tagged_trait, 'dcc_review'))
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data, follow=True)
create_decision_url = reverse('tags:tagged-traits:pk:dcc-decision:new', args=[self.tagged_trait.pk])
# Redirects first to the create page, then to the need_decision page.
self.assertEqual(response.redirect_chain, [(create_decision_url, 302, ), (self.need_decision_url, 302, )])
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 2)
self.assertIn('Switched to creating', str(messages[0]))
self.assertIn('Cannot create', str(messages[1]))
self.assertFalse(hasattr(self.tagged_trait, 'dcc_review'))
def test_get_message_and_redirect_review_confirmed_tagged_trait(self):
"""Get request gives a warning message and redirects if the tagged trait has dcc review status confirmed."""
original_comment = self.dcc_decision.comment
self.tagged_trait.dcc_review.status = models.DCCReview.STATUS_CONFIRMED
self.tagged_trait.dcc_review.save()
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url)
self.dcc_decision.refresh_from_db()
self.assertRedirects(response, self.need_decision_url)
self.assertEqual(original_comment, self.dcc_decision.comment)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot update', str(messages[0]))
def test_post_message_and_redirect_review_confirmed_tagged_trait(self):
"""Post request gives a warning message and redirects if the tagged trait has dcc review status confirmed."""
original_comment = self.dcc_decision.comment
self.tagged_trait.dcc_review.status = models.DCCReview.STATUS_CONFIRMED
self.tagged_trait.dcc_review.save()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(url, form_data)
self.dcc_decision.refresh_from_db()
self.assertRedirects(response, self.need_decision_url)
self.assertEqual(original_comment, self.dcc_decision.comment)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot update', str(messages[0]))
def test_post_message_and_redirect_review_confirmed_tagged_trait_with_form_error(self):
"""Post request gives a warning message and redirects if dcc review status confirmed, even with bad data."""
original_comment = self.dcc_decision.comment
self.tagged_trait.dcc_review.status = models.DCCReview.STATUS_CONFIRMED
self.tagged_trait.dcc_review.save()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data)
self.dcc_decision.refresh_from_db()
self.assertRedirects(response, self.need_decision_url)
self.assertEqual(original_comment, self.dcc_decision.comment)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot update', str(messages[0]))
def test_get_success_missing_response_tagged_trait(self):
"""Get response is successful if the tagged trait has no study response."""
self.tagged_trait.dcc_review.study_response.hard_delete()
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_post_updates_decision_missing_response_tagged_trait(self):
"""Post request successfully updates decision when the tagged trait has no study response."""
original_comment = self.dcc_decision.comment
self.tagged_trait.dcc_review.study_response.hard_delete()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(url, form_data)
self.dcc_decision.refresh_from_db()
self.assertRedirects(response, self.need_decision_url)
self.assertNotEqual(original_comment, self.dcc_decision.comment)
self.assertEqual(self.dcc_decision.comment, form_data['comment'])
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Successfully updated', str(messages[0]))
def test_post_does_not_redirect_with_missing_response_tagged_trait_with_form_error(self):
"""Post request does not give message and redirect, but doesn't update decision when study response is missing and data is bad.""" # noqa
original_comment = self.dcc_decision.comment
self.tagged_trait.dcc_review.study_response.hard_delete()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data)
self.dcc_decision.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(original_comment, self.dcc_decision.comment)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 0)
def test_get_message_and_redirect_response_agree_tagged_trait(self):
"""Get request gives a warning message and redirects if the tagged trait has study response status agree."""
original_comment = self.dcc_decision.comment
self.tagged_trait.dcc_review.study_response.status = models.StudyResponse.STATUS_AGREE
self.tagged_trait.dcc_review.study_response.save()
url = self.get_url(self.tagged_trait.pk)
response = self.client.get(url)
self.dcc_decision.refresh_from_db()
self.assertRedirects(response, self.need_decision_url)
self.assertEqual(original_comment, self.dcc_decision.comment)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot update', str(messages[0]))
def test_post_message_and_redirect_response_agree_tagged_trait(self):
"""Post request gives a warning message and redirects if the tagged trait has study response status agree."""
original_comment = self.dcc_decision.comment
self.tagged_trait.dcc_review.study_response.status = models.StudyResponse.STATUS_AGREE
self.tagged_trait.dcc_review.study_response.save()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(url, form_data)
self.dcc_decision.refresh_from_db()
self.assertRedirects(response, self.need_decision_url)
self.assertEqual(original_comment, self.dcc_decision.comment)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot update', str(messages[0]))
def test_post_message_and_redirect_response_agree_tagged_trait_with_form_error(self):
"""Post request gives a warning message and redirects if study response status agree, even with bad data."""
original_comment = self.dcc_decision.comment
self.tagged_trait.dcc_review.study_response.status = models.StudyResponse.STATUS_AGREE
self.tagged_trait.dcc_review.study_response.save()
url = self.get_url(self.tagged_trait.pk)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(url, form_data)
self.dcc_decision.refresh_from_db()
self.assertRedirects(response, self.need_decision_url)
self.assertEqual(original_comment, self.dcc_decision.comment)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Cannot update', str(messages[0]))
def test_get_message_and_redirect_to_create_for_missing_decision(self):
"""Shows warning message and redirects to create page if TaggedTrait has no decision."""
# Delete the DCC Decision and reset the other objects saved to this testcase.
self.dcc_decision.delete()
self.study_response = models.StudyResponse.objects.get(pk=self.study_response.pk)
self.tagged_trait = self.study_response.dcc_review.tagged_trait
self.dcc_decision = None
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'dcc_decision'))
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertRedirects(
response, reverse('tags:tagged-traits:pk:dcc-decision:new', args=[self.tagged_trait.pk]))
# Check for warning message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Switched to creating', str(messages[0]))
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'dcc_decision'))
def test_post_message_and_redirect_to_create_for_missing_decision(self):
"""Shows warning message, does not update, and redirects to create view if TaggedTrait has no decision."""
# Delete the DCC Decision and reset the other objects saved to this testcase.
self.dcc_decision.delete()
self.study_response = models.StudyResponse.objects.get(pk=self.study_response.pk)
self.tagged_trait = self.study_response.dcc_review.tagged_trait
self.dcc_decision = None
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'dcc_decision'))
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.assertRedirects(
response, reverse('tags:tagged-traits:pk:dcc-decision:new', args=[self.tagged_trait.pk]))
# Check for warning message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Switched to creating', str(messages[0]))
# The input DCCDecision was not saved.
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'dcc_decision'))
def test_post_message_and_redirect_to_create_for_missing_decision_with_form_error(self):
"""Shows warning message, does not update, and redirects to create if no decision exists and data is bad."""
# Delete the DCC Decision and reset the other objects saved to this testcase.
self.dcc_decision.delete()
self.study_response = models.StudyResponse.objects.get(pk=self.study_response.pk)
self.tagged_trait = self.study_response.dcc_review.tagged_trait
self.dcc_decision = None
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'dcc_decision'))
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': ''}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.tagged_trait.dcc_review.refresh_from_db()
self.assertRedirects(
response, reverse('tags:tagged-traits:pk:dcc-decision:new', args=[self.tagged_trait.pk]))
# Check for warning message.
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('Switched to creating', str(messages[0]))
# The input DCCDecision was not saved.
self.assertFalse(hasattr(self.tagged_trait.dcc_review, 'dcc_decision'))
def test_shows_other_tags(self):
"""Other tags linked to the same trait are shown on the page."""
another_tagged_trait = factories.TaggedTraitFactory.create(trait=self.tagged_trait.trait)
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_trait.tag.title, content)
def test_shows_archived_other_tags(self):
"""Other tags linked to the same trait are shown on the page."""
another_tagged_trait = factories.TaggedTraitFactory.create(trait=self.tagged_trait.trait, archived=True)
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertIn(another_tagged_trait.tag.title, content)
self.assertIn(self.tagged_trait.tag.title, content)
def test_shows_tag_only_once_when_it_is_archived(self):
"""The tag is only shown once, even when the tagged variable is archived."""
self.tagged_trait.archive()
response = self.client.get(self.get_url(self.tagged_trait.pk))
context = response.context
self.assertTrue(context['show_other_tags'])
content = str(response.content)
self.assertNotIn(self.tagged_trait.tag, context['other_tags'])
self.assertNotIn(self.tagged_trait.tag, context['archived_other_tags'])
def test_archives_tagged_trait_changed_from_confirm_to_remove(self):
"""Updating a DCCDecision from confirm to remove archives the tagged trait."""
self.dcc_decision.delete()
self.dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review, decision=models.DCCDecision.DECISION_CONFIRM)
self.assertFalse(self.tagged_trait.archived)
form_data = {forms.DCCDecisionForm.SUBMIT_REMOVE: 'Remove', 'comment': 'get rid of it'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
dcc_decision = models.DCCDecision.objects.latest('modified')
self.tagged_trait.refresh_from_db()
self.assertEqual(form_data['comment'], dcc_decision.comment)
self.assertTrue(self.tagged_trait.archived)
def test_unarchives_tagged_trait_changed_from_remove_to_confirm(self):
"""Updating a DCCDecision from remove to confirm unarchives the tagged trait."""
self.dcc_decision.delete()
self.dcc_decision = factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
self.tagged_trait.archive()
self.tagged_trait.refresh_from_db()
self.assertTrue(self.tagged_trait.archived)
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
dcc_decision = models.DCCDecision.objects.latest('modified')
self.tagged_trait.refresh_from_db()
self.assertEqual(form_data['comment'], dcc_decision.comment)
self.assertFalse(self.tagged_trait.archived)
def test_get_deprecated_tagged_trait(self):
"""A get request redirects with a warning for a deprecated trait."""
study_version = self.dcc_decision.dcc_review.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.dcc_decision.refresh_from_db()
self.assertRedirects(response, self.need_decision_url)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('newer version', str(messages[0]))
def test_post_deprecated_tagged_trait(self):
"""Posting valid data to the form does not update a DCCDecision for a deprecated trait."""
study_version = self.dcc_decision.dcc_review.tagged_trait.trait.source_dataset.source_study_version
study_version.i_is_deprecated = True
study_version.save()
original_comment = self.dcc_decision.comment
form_data = {forms.DCCDecisionForm.SUBMIT_CONFIRM: 'Confirm', 'comment': 'looks good'}
response = self.client.post(self.get_url(self.tagged_trait.pk), form_data)
self.dcc_decision.refresh_from_db()
self.assertRedirects(response, self.need_decision_url)
self.assertEqual(original_comment, self.dcc_decision.comment)
messages = list(response.wsgi_request._messages)
self.assertEqual(len(messages), 1)
self.assertIn('newer version', str(messages[0]))
class DCCDecisionUpdateDCCAnalystTest(DCCDecisionUpdateDCCTestsMixin, DCCAnalystLoginTestCase):
# Run all tests in DCCDecisionUpdateDCCTestsMixin, as a DCC analyst.
pass
class DCCDecisionUpdateDCCDeveloperTest(DCCDecisionUpdateDCCTestsMixin, DCCDeveloperLoginTestCase):
# Run all tests in DCCDecisionUpdateDCCTestsMixin, as a DCC developer.
pass
class DCCDecisionUpdateOtherUserTest(UserLoginTestCase):
def setUp(self):
super().setUp()
self.study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)
self.tagged_trait = self.study_response.dcc_review.tagged_trait
def get_url(self, *args):
"""Get the url for the view this class is supposed to test."""
return reverse('tags:tagged-traits:pk:dcc-decision:update', args=args)
def test_forbidden_get_request(self):
"""Get returns forbidden status code for non-DCC users."""
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_post_request(self):
"""Post returns forbidden status code for non-DCC users."""
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertEqual(response.status_code, 403)
def test_forbidden_get_request_with_existing_decision(self):
"""Get returns forbidden status code for non-DCC users when decision exists."""
factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
response = self.client.get(self.get_url(self.tagged_trait.pk))
self.assertEqual(response.status_code, 403)
def test_forbidden_post_request_with_existing_decision(self):
"""Post returns forbidden status code for non-DCC users when decision exists."""
factories.DCCDecisionFactory.create(
dcc_review=self.tagged_trait.dcc_review, decision=models.DCCDecision.DECISION_REMOVE)
response = self.client.post(self.get_url(self.tagged_trait.pk), {})
self.assertEqual(response.status_code, 403)
class TagsLoginRequiredTest(LoginRequiredTestCase):
def test_tags_login_required(self):
"""All recipes urls redirect to login page if no user is logged in."""
self.assert_redirect_all_urls('tags')
| 55.464706
| 146
| 0.703164
| 61,628
| 509,166
| 5.551665
| 0.012008
| 0.05964
| 0.025691
| 0.016941
| 0.951534
| 0.940027
| 0.931677
| 0.924101
| 0.910276
| 0.902384
| 0
| 0.005116
| 0.194899
| 509,166
| 9,179
| 147
| 55.470748
| 0.82951
| 0.111117
| 0
| 0.879842
| 0
| 0
| 0.143912
| 0.087257
| 0
| 0
| 0
| 0
| 0.355565
| 1
| 0.093972
| false
| 0.003546
| 0.001773
| 0.003273
| 0.116612
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8e63790ace0d7fc3dc389234a66cfc22c7259e76
| 40,593
|
py
|
Python
|
daqa-gen/qpas/compare.py
|
facebookresearch/daqa
|
bf96ad3bffdd80834e94edffe796534e6290e533
|
[
"CC-BY-4.0"
] | 19
|
2019-12-30T19:00:48.000Z
|
2022-03-31T09:21:05.000Z
|
daqa-gen/qpas/compare.py
|
facebookresearch/daqa
|
bf96ad3bffdd80834e94edffe796534e6290e533
|
[
"CC-BY-4.0"
] | 1
|
2021-04-15T20:25:50.000Z
|
2021-04-22T16:03:12.000Z
|
daqa-gen/qpas/compare.py
|
facebookresearch/daqa
|
bf96ad3bffdd80834e94edffe796534e6290e533
|
[
"CC-BY-4.0"
] | 11
|
2020-01-04T19:37:24.000Z
|
2021-11-06T16:19:53.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from qpas.utils import (compute_rel_diff, get_lst_durations, get_lst_events,
get_lst_loudness, sample_duration, sample_loudness,
sample_number, sample_second_number,
sample_rel_duration, sample_rel_loudness,
sanitize_question)
def compare_ordinal(dataset, narrative, _):
questions = ['Was the <O1> [sound event,sound] [the same as,similar to] the <O2> [sound event,sound]?', # noqa: E501
'Was the <O1> [sound event,sound] and <O2> [sound event,sound] [the same,similar]?', # noqa: E501
'Were the <O1> and <O2> [sound events,sounds] [the same,similar]?',
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
answer = 'yes' if lst_events[number_1 - 1] == lst_events[number_2 - 1] \
else 'no'
return question, answer
def compare_ordinal_event(dataset, narrative, _):
questions = ['Was the <O> [sound event,sound] [a,an] <S> <A>?', # noqa: E501
'Did the <O> [sound event,sound] [sound,seem] like [a,an] <S> <A>?', # noqa: E501
'[Listening to,Hearing] the <O> [sound event,sound], was it [a,an] <S> <A>?', # noqa: E501
'[Listening to,Hearing] the <O> [sound event,sound], did it [sound,seem] like [a,an] <S> <A>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
event = str(np.random.choice(dataset['events'])) # sample event
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<O>', ordinal) # insert ordinal
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
answer = 'yes' if lst_events[number - 1] == event else 'no'
return question, answer
def compare_loudness(dataset, narrative, rel_diff):
questions = ['Was the <S1> <A1> <RL> than the <S2> <A2>?',
'Was the sound of the <S1> <A1> <RL> than the sound of the <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S1> <A1> and the sound of the <S2> <A2>, was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S1> <A1> and the <S2> <A2>, was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S2> <A2> and the sound of the <S1> <A1>, was the latter <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S2> <A2> and the <S1> <A1>, was the latter <RL>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, 'Question (compare_loudness) illposed.'
event_1 = str(np.random.choice(unique_lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
rel_loudness = sample_rel_loudness()
x_unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(x_unique_lst_events) > 0, \
'Question (compare_loudness) illposed.'
event_2 = str(np.random.choice(x_unique_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert lst_events.count(event_1) == 1, \
'Question (compare_loudness) illposed.'
assert lst_events.count(event_2) == 1, \
'Question (compare_loudness) illposed.'
assert event_1 != event_2, 'Question (compare_loudness) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<RL>', rel_loudness) # insert loudness
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[lst_events.index(event_1)]
e_2_loudness = lst_loudness[lst_events.index(event_2)]
# Assert a good margin in relative loudness
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (compare_loudness) illposed.'
if 'quiet' in question:
answer = 'yes' if e_1_loudness < e_2_loudness else 'no'
elif 'loud' in question:
answer = 'yes' if e_1_loudness > e_2_loudness else 'no'
else:
assert False, 'Loudness illdefined in Question (compare_loudness).'
return question, answer
def compare_loudness_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O1> [sound event,sound] <RL> than the <O2> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O2> [sound event,sound] and the <O1> [sound event,sound], was the latter <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O2> and <O1> [sound events,sounds], was the latter <RL>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
rel_loudness = sample_rel_loudness()
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_loudness_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<RL>', rel_loudness) # insert loudness
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[number_1 - 1]
e_2_loudness = lst_loudness[number_2 - 1]
# Assert a good margin in relative loudness
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (compare_loudness_ordinal) illposed.'
if 'quiet' in question:
answer = 'yes' if e_1_loudness < e_2_loudness else 'no'
elif 'loud' in question:
answer = 'yes' if e_1_loudness > e_2_loudness else 'no'
else:
assert False, 'Loudness illdefined in Question (compare_loudness_ordinal).'
return question, answer
def compare_loudness_event_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S> <A> <RL> than the <O> [sound event,sound]?',
'Was the sound of the <S> <A> <RL> than the <O> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, was the latter <RL>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_loudness_event_ordinal) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
rel_loudness = sample_rel_loudness()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_loudness_event_ordinal) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_loudness_event_ordinal) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RL>', rel_loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[lst_events.index(event)]
e_2_loudness = lst_loudness[number - 1]
# Assert a good margin in relative loudness
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (compare_loudness_event_ordinal) illposed.'
if 'quiet' in question:
answer = 'yes' if e_1_loudness < e_2_loudness else 'no'
elif 'loud' in question:
answer = 'yes' if e_1_loudness > e_2_loudness else 'no'
else:
assert False, \
'Loudness illdefined in Question (compare_loudness_event_ordinal).'
return question, answer
def compare_loudness_ordinal_event(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O> [sound event,sound] <RL> than the <S> <A>?',
'Was the <O> [sound event,sound] <RL> than the sound of the <S> <A>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], was the latter <RL>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_loudness_ordinal_event) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
rel_loudness = sample_rel_loudness()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_loudness_ordinal_event) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_loudness_ordinal_event) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RL>', rel_loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[number - 1]
e_2_loudness = lst_loudness[lst_events.index(event)]
# Assert a good margin in relative loudness
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (compare_loudness_ordinal_event) illposed.'
if 'quiet' in question:
answer = 'yes' if e_1_loudness < e_2_loudness else 'no'
elif 'loud' in question:
answer = 'yes' if e_1_loudness > e_2_loudness else 'no'
else:
assert False, \
'Loudness illdefined in Question (compare_loudness_ordinal_event).'
return question, answer
def compare_same_loudness(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S1> <A1> [roughly,approximately] as <L> as the <S2> <A2>?', # noqa: E501
'Was the sound of the <S1> <A1> [roughly,approximately] as <L> as the sound of the <S2> <A2>?', # noqa: E501
'Was the sound of the <S1> <A1> [roughly,approximately] the same loudness as the sound of the <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S1> <A1> and the sound of the <S2> <A2>, did they [roughly,approximately] have the same loudness?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S1> <A1> and the <S2> <A2>, did they [roughly,approximately] have the same loudness?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_same_loudness) illposed.'
event_1 = str(np.random.choice(unique_lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
loudness = sample_loudness()
x_unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(x_unique_lst_events) > 0, \
'Question (compare_same_loudness) illposed.'
event_2 = str(np.random.choice(x_unique_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert lst_events.count(event_1) == 1, \
'Question (compare_same_loudness) illposed.'
assert lst_events.count(event_2) == 1, \
'Question (compare_same_loudness) illposed.'
assert event_1 != event_2, 'Question (compare_same_loudness) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[lst_events.index(event_1)]
e_2_loudness = lst_loudness[lst_events.index(event_2)]
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_loudness) illposed.'
answer = 'yes' if rel_loudness_diff <= rel_diff else 'no'
return question, answer
def compare_same_loudness_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O1> [sound event,sound] [roughly,approximately] as <L> as the <O2> [sound event,sound]?', # noqa: E501
'Was the <O1> and <O2> [sound events,sounds] [roughly,approximately] as <L>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], did they [roughly,approximately] have the same loudness?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], did they have [roughly,approximately] the same loudness?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
loudness = sample_loudness()
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_same_loudness_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[number_1 - 1]
e_2_loudness = lst_loudness[number_2 - 1]
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_loudness_ordinal) illposed.'
answer = 'yes' if rel_loudness_diff <= rel_diff else 'no'
return question, answer
def compare_same_loudness_event_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S> <A> [roughly,approximately] as <L> as the <O> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S> <A> and the <O> [sound event,sound], were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], did they [roughly,approximately] have the same loudness?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S> <A> and the <O> [sound event,sound], did they [roughly,approximately] have the same loudness?', # noqa: E501
'Was the <O> [sound event,sound] [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the sound of the <S> <A>, were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, did they [roughly,approximately] have the same loudness?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the sound of the <S> <A>, did they [roughly,approximately] have the same loudness?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_same_loudness_event_ordinal) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
loudness = sample_loudness()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_same_loudness_event_ordinal) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_same_loudness_event_ordinal) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[lst_events.index(event)]
e_2_loudness = lst_loudness[number - 1]
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_loudness_event_ordinal) illposed.'
answer = 'yes' if rel_loudness_diff <= rel_diff else 'no'
return question, answer
def compare_duration(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S1> <A1> <RD> than the <S2> <A2>?',
'Was the sound of the <S1> <A1> <RD> than the sound of the <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S1> <A1> and the sound of the <S2> <A2>, was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S1> <A1> and the <S2> <A2>, was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S2> <A2> and the sound of the <S1> <A1>, was the latter <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S2> <A2> and the <S1> <A1>, was the latter <RD>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_duration) illposed.'
event_1 = str(np.random.choice(unique_lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
rel_duration = sample_rel_duration()
x_unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(x_unique_lst_events) > 0, \
'Question (compare_duration) illposed.'
event_2 = str(np.random.choice(x_unique_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert lst_events.count(event_1) == 1, \
'Question (compare_duration) illposed.'
assert lst_events.count(event_2) == 1, \
'Question (compare_duration) illposed.'
assert event_1 != event_2, 'Question (compare_duration) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<RD>', rel_duration) # insert duration
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[lst_events.index(event_1)]
e_2_duration = lst_duration[lst_events.index(event_2)]
# Assert a good margin in relative duration
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (compare_duration) illposed.'
if 'short' in question:
answer = 'yes' if e_1_duration < e_2_duration else 'no'
elif 'long' in question:
answer = 'yes' if e_1_duration > e_2_duration else 'no'
else:
assert False, 'Duration illdefined in Question (compare_duration).'
return question, answer
def compare_duration_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O1> [sound event,sound] <RD> than the <O2> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O2> [sound event,sound] and the <O1> [sound event,sound], was the latter <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O2> and <O1> [sound events,sounds], was the latter <RD>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
rel_duration = sample_rel_duration()
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_duration_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<RD>', rel_duration) # insert duration
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[number_1 - 1]
e_2_duration = lst_duration[number_2 - 1]
# Assert a good margin in relative duration
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (compare_duration_ordinal) illposed.'
if 'short' in question:
answer = 'yes' if e_1_duration < e_2_duration else 'no'
elif 'long' in question:
answer = 'yes' if e_1_duration > e_2_duration else 'no'
else:
assert False, 'Duration illdefined in Question (compare_duration_ordinal).'
return question, answer
def compare_duration_event_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S> <A> <RD> than the <O> [sound event,sound]?',
'Was the sound of the <S> <A> <RD> than the <O> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, was the latter <RD>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_duration_event_ordinal) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
rel_duration = sample_rel_duration()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_duration_event_ordinal) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_duration_event_ordinal) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RD>', rel_duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[lst_events.index(event)]
e_2_duration = lst_duration[number - 1]
# Assert a good margin in relative duration
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (compare_duration_event_ordinal) illposed.'
if 'short' in question:
answer = 'yes' if e_1_duration < e_2_duration else 'no'
elif 'long' in question:
answer = 'yes' if e_1_duration > e_2_duration else 'no'
else:
assert False, \
'Duration illdefined in Question (compare_duration_event_ordinal).'
return question, answer
def compare_duration_ordinal_event(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O> [sound event,sound] <RD> than the <S> <A>?',
'Was the <O> [sound event,sound] <RD> than the sound of the <S> <A>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], was the latter <RD>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_duration_ordinal_event) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
rel_duration = sample_rel_duration()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_duration_ordinal_event) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_duration_ordinal_event) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RD>', rel_duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[number - 1]
e_2_duration = lst_duration[lst_events.index(event)]
# Assert a good margin in relative duration
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (compare_duration_ordinal_event) illposed.'
if 'short' in question:
answer = 'yes' if e_1_duration < e_2_duration else 'no'
elif 'long' in question:
answer = 'yes' if e_1_duration > e_2_duration else 'no'
else:
assert False, \
'Duration illdefined in Question (compare_duration_ordinal_event).'
return question, answer
def compare_same_duration(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S1> <A1> [roughly,approximately] as <D> as the <S2> <A2>?', # noqa: E501
'Was the sound of the <S1> <A1> [roughly,approximately] as <D> as the sound of the <S2> <A2>?', # noqa: E501
'Was the sound of the <S1> <A1> [roughly,approximately] the same duration as the sound of the <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S1> <A1> and the sound of the <S2> <A2>, did they [roughly,approximately] have the same duration?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S1> <A1> and the <S2> <A2>, did they [roughly,approximately] have the same duration?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_same_duration) illposed.'
event_1 = str(np.random.choice(unique_lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
duration = sample_duration()
x_unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(x_unique_lst_events) > 0, \
'Question (compare_same_duration) illposed.'
event_2 = str(np.random.choice(x_unique_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert lst_events.count(event_1) == 1, \
'Question (compare_same_duration) illposed.'
assert lst_events.count(event_2) == 1, \
'Question (compare_same_duration) illposed.'
assert event_1 != event_2, 'Question (compare_same_duration) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<D>', duration) # insert duration
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[lst_events.index(event_1)]
e_2_duration = lst_duration[lst_events.index(event_2)]
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_duration_diff > rel_diff,
rel_duration_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_duration) illposed.'
answer = 'yes' if rel_duration_diff <= rel_diff else 'no'
return question, answer
def compare_same_duration_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O1> [sound event,sound] [roughly,approximately] as <D> as the <O2> [sound event,sound]?', # noqa: E501
'Was the <O1> and <O2> [sound events,sounds] [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], did they [roughly,approximately] have the same duration?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], did they [roughly,approximately] have the same duration?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
duration = sample_duration()
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_same_duration_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<D>', duration) # insert duration
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[number_1 - 1]
e_2_duration = lst_duration[number_2 - 1]
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_duration_diff > rel_diff,
rel_duration_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_duration_ordinal) illposed.'
answer = 'yes' if rel_duration_diff <= rel_diff else 'no'
return question, answer
def compare_same_duration_event_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S> <A> [roughly,approximately] as <D> as the <O> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S> <A> and the <O> [sound event,sound], were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], did they [roughly,approximately] have the same duration?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S> <A> and the <O> [sound event,sound], did they [roughly,approximately] have the same duration?', # noqa: E501
'Was the <O> [sound event,sound] [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the sound of the <S> <A>, were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, did they [roughly,approximately] have the same duration?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the sound of the <S> <A>, did they [roughly,approximately] have the same duration?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_same_duration_event_ordinal) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
duration = sample_duration()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_same_duration_event_ordinal) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_same_duration_event_ordinal) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<D>', duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[lst_events.index(event)]
e_2_duration = lst_duration[number - 1]
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_duration_diff > rel_diff,
rel_duration_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_duration_event_ordinal) illposed.'
answer = 'yes' if rel_duration_diff <= rel_diff else 'no'
return question, answer
| 56.536212
| 186
| 0.658857
| 5,440
| 40,593
| 4.718934
| 0.025368
| 0.055744
| 0.039734
| 0.040396
| 0.974913
| 0.965954
| 0.962058
| 0.946087
| 0.939309
| 0.929531
| 0
| 0.02348
| 0.219422
| 40,593
| 717
| 187
| 56.615063
| 0.786688
| 0.079398
| 0
| 0.728988
| 0
| 0.145798
| 0.336598
| 0.066303
| 0
| 0
| 0
| 0
| 0.111492
| 1
| 0.027444
| false
| 0
| 0.005146
| 0
| 0.060034
| 0.001715
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8eecbf18bbf5f4f6632198aef222ec2e706f3f6e
| 92
|
py
|
Python
|
parameters_8000.py
|
ManasKhosla/intelli-rm
|
5c2a2e9cf1988824642a053ed60b0b397a1f99ca
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8000.py
|
ManasKhosla/intelli-rm
|
5c2a2e9cf1988824642a053ed60b0b397a1f99ca
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8000.py
|
ManasKhosla/intelli-rm
|
5c2a2e9cf1988824642a053ed60b0b397a1f99ca
|
[
"BSD-3-Clause"
] | null | null | null |
password="pbkdf2(1000,20,sha512)$982546055dd6b8d3$455b18740b2a2f93286258f012d45e0b516b3b57"
| 46
| 91
| 0.891304
| 7
| 92
| 11.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.571429
| 0.01087
| 92
| 1
| 92
| 92
| 0.32967
| 0
| 0
| 0
| 0
| 0
| 0.869565
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
79b8637d6f13b0f9e595d4355fd82c18fbe8c810
| 13,091
|
py
|
Python
|
flowx/imbound/_interface/visco/_interface/_advect_weno3.py
|
Balaras-Group/flowX
|
29c1d6209abbfab553997b557794e4d7b06a09a8
|
[
"BSD-3-Clause"
] | null | null | null |
flowx/imbound/_interface/visco/_interface/_advect_weno3.py
|
Balaras-Group/flowX
|
29c1d6209abbfab553997b557794e4d7b06a09a8
|
[
"BSD-3-Clause"
] | 7
|
2020-03-05T20:39:32.000Z
|
2020-03-13T01:11:26.000Z
|
flowx/imbound/_interface/visco/_interface/_advect_weno3.py
|
Balaras-Group/flowX
|
29c1d6209abbfab553997b557794e4d7b06a09a8
|
[
"BSD-3-Clause"
] | 1
|
2020-03-09T17:38:00.000Z
|
2020-03-09T17:38:00.000Z
|
import numpy
from numba import jit
def advect_dynamic_grid(lmda, s, u, v, dt, dx, dy, nx, ny):
"""
Subroutine to advect dynamic x-y grid
"""
pfl = lmda >= 0.0
advect_weno3(s, pfl, u, v, dt, dx, dy, nx, ny)
return
def advect_solid(s, u, v, dt, dx, dy, nx, ny):
"""
Subroutine to advect solid interface
"""
pfl = numpy.ones_like(s, dtype=int)
advect_weno3(s, pfl, u, v, dt, dx, dy, nx, ny)
return
def advect_weno3(s, pfl, u, v, dt, dx, dy, nx, ny):
"""
Subroutine to add additional guard cells for WENO3 stencil
"""
nguard_add = 2
s_weno = numpy.zeros((nx + 2 * nguard_add, ny + 2 * nguard_add), dtype=float)
u_weno = numpy.zeros((nx + 2 * nguard_add - 1, ny + 2 * nguard_add), dtype=float)
v_weno = numpy.zeros((nx + 2 * nguard_add, ny + 2 * nguard_add - 1), dtype=float)
pfl_weno = numpy.zeros((nx + 2 * nguard_add, ny + 2 * nguard_add), dtype=int)
s_weno[2:-2, 2:-2] = s
u_weno[2:-2, 2:-2] = u
v_weno[2:-2, 2:-2] = v
pfl_weno[2:-2, 2:-2] = pfl
# x low
s_weno[1, :] = 2 * s_weno[2, :] - s_weno[3, :]
s_weno[0, :] = 2 * s_weno[1, :] - s_weno[2, :]
# x high
s_weno[-2, :] = 2 * s_weno[-3, :] - s_weno[-4, :]
s_weno[-1, :] = 2 * s_weno[-2, :] - s_weno[-3, :]
# y low
s_weno[:, 1] = 2 * s_weno[:, 2] - s_weno[:, 3]
s_weno[:, 0] = 2 * s_weno[:, 1] - s_weno[:, 2]
# y high
s_weno[:, -2] = 2 * s_weno[:, -3] - s_weno[:, -4]
s_weno[:, -1] = 2 * s_weno[:, -2] - s_weno[:, -3]
_jit_advect_weno3(
s_weno,
pfl_weno,
u_weno,
v_weno,
dt,
dx,
dy,
nx + 2 * nguard_add,
ny + 2 * nguard_add,
)
s[:, :] = s_weno[2:-2, 2:-2]
return
@jit(nopython=True)
def _jit_advect_weno3(s, pfl, u, v, dt, dx, dy, nx, ny):
so = numpy.copy(s)
eps = 1e-15
for i in range(3, nx - 3):
for j in range(3, ny - 3):
# --Velocities on faces used for divergence --> div(u*phi)
ul = u[i - 1, j]
ur = u[i, j]
vl = v[i, j - 1]
vr = v[i, j]
# - WENO3 stencil in X direction --------------
if ur > 0: # u = (+) Downwind
s1r = so[i - 2, j]
s2r = so[i - 1, j]
s3r = so[i, j]
s4r = so[i + 1, j]
s5r = so[i + 2, j]
rIS1r = (
13.0 / 12.0 * (s1r - 2.0 * s2r + s3r) ** 2.0
+ 1.0 / 4.0 * (s1r - 4.0 * s2r + 3.0 * s3r) ** 2.0
)
rIS2r = (
13.0 / 12.0 * (s2r - 2.0 * s3r + s4r) ** 2.0
+ 1.0 / 4.0 * (s2r - s4r) ** 2.0
)
rIS3r = (
13.0 / 12.0 * (s3r - 2.0 * s4r + s5r) ** 2.0
+ 1.0 / 4.0 * (3.0 * s3r - 4.0 * s4r + s5r) ** 2.0
)
aT1r = 1.0 / 10.0 / (eps + rIS1r) ** 2.0
aT2r = 6.0 / 10.0 / (eps + rIS2r) ** 2.0
aT3r = 3.0 / 10.0 / (eps + rIS3r) ** 2.0
a1r = aT1r / (aT1r + aT2r + aT3r)
a2r = aT2r / (aT1r + aT2r + aT3r)
a3r = aT3r / (aT1r + aT2r + aT3r)
fT1r = 2.0 / 6.0 * s1r - 7.0 / 6.0 * s2r + 11.0 / 6.0 * s3r
fT2r = -1.0 / 6.0 * s2r + 5.0 / 6.0 * s3r + 2.0 / 6.0 * s4r
fT3r = 2.0 / 6.0 * s3r + 5.0 / 6.0 * s4r - 1.0 / 6.0 * s5r
else: # u = (-) Upwind
s1r = so[i - 1, j]
s2r = so[i, j]
s3r = so[i + 1, j]
s4r = so[i + 2, j]
s5r = so[i + 3, j]
rIS1r = (
13.0 / 12.0 * (s1r - 2.0 * s2r + s3r) ** 2.0
+ 1.0 / 4.0 * (s1r - 4.0 * s2r + 3.0 * s3r) ** 2.0
)
rIS2r = (
13.0 / 12.0 * (s2r - 2.0 * s3r + s4r) ** 2.0
+ 1.0 / 4.0 * (s2r - s4r) ** 2.0
)
rIS3r = (
13.0 / 12.0 * (s3r - 2.0 * s4r + s5r) ** 2.0
+ 1.0 / 4.0 * (3.0 * s3r - 4.0 * s4r + s5r) ** 2.0
)
aT1r = 3.0 / 10.0 / (eps + rIS1r) ** 2.0
aT2r = 6.0 / 10.0 / (eps + rIS2r) ** 2.0
aT3r = 1.0 / 10.0 / (eps + rIS3r) ** 2.0
a1r = aT1r / (aT1r + aT2r + aT3r)
a2r = aT2r / (aT1r + aT2r + aT3r)
a3r = aT3r / (aT1r + aT2r + aT3r)
fT1r = -1.0 / 6.0 * s1r + 5.0 / 6.0 * s2r + 2.0 / 6.0 * s3r
fT2r = 2.0 / 6.0 * s2r + 5.0 / 6.0 * s3r - 1.0 / 6.0 * s4r
fT3r = 11.0 / 6.0 * s3r - 7.0 / 6.0 * s4r + 2.0 / 6.0 * s5r
if ul > 0: # u = (+) Downwind
s1l = so[i - 3, j]
s2l = so[i - 2, j]
s3l = so[i - 1, j]
s4l = so[i, j]
s5l = so[i + 1, j]
rIS1l = (
13.0 / 12.0 * (s1l - 2.0 * s2l + s3l) ** 2.0
+ 1.0 / 4.0 * (s1l - 4.0 * s2l + 3.0 * s3l) ** 2.0
)
rIS2l = (
13.0 / 12.0 * (s2l - 2.0 * s3l + s4l) ** 2.0
+ 1.0 / 4.0 * (s2l - s4l) ** 2.0
)
rIS3l = (
13.0 / 12.0 * (s3l - 2.0 * s4l + s5l) ** 2.0
+ 1.0 / 4.0 * (3.0 * s3l - 4.0 * s4l + s5l) ** 2.0
)
aT1l = 1.0 / 10.0 / (eps + rIS1l) ** 2.0
aT2l = 6.0 / 10.0 / (eps + rIS2l) ** 2.0
aT3l = 3.0 / 10.0 / (eps + rIS3l) ** 2.0
a1l = aT1l / (aT1l + aT2l + aT3l)
a2l = aT2l / (aT1l + aT2l + aT3l)
a3l = aT3l / (aT1l + aT2l + aT3l)
fT1l = 2.0 / 6.0 * s1l - 7.0 / 6.0 * s2l + 11.0 / 6.0 * s3l
fT2l = -1.0 / 6.0 * s2l + 5.0 / 6.0 * s3l + 2.0 / 6.0 * s4l
fT3l = 2.0 / 6.0 * s3l + 5.0 / 6.0 * s4l - 1.0 / 6.0 * s5l
else: # u = (-) Upwind
s1l = so[i - 2, j]
s2l = so[i - 1, j]
s3l = so[i, j]
s4l = so[i + 1, j]
s5l = so[i + 2, j]
rIS1l = (
13.0 / 12.0 * (s1l - 2.0 * s2l + s3l) ** 2.0
+ 1.0 / 4.0 * (s1l - 4.0 * s2l + 3.0 * s3l) ** 2.0
)
rIS2l = (
13.0 / 12.0 * (s2l - 2.0 * s3l + s4l) ** 2.0
+ 1.0 / 4.0 * (s2l - s4l) ** 2.0
)
rIS3l = (
13.0 / 12.0 * (s3l - 2.0 * s4l + s5l) ** 2.0
+ 1.0 / 4.0 * (3.0 * s3l - 4.0 * s4l + s5l) ** 2.0
)
aT1l = 3.0 / 10.0 / (eps + rIS1l) ** 2.0
aT2l = 6.0 / 10.0 / (eps + rIS2l) ** 2.0
aT3l = 1.0 / 10.0 / (eps + rIS3l) ** 2.0
a1l = aT1l / (aT1l + aT2l + aT3l)
a2l = aT2l / (aT1l + aT2l + aT3l)
a3l = aT3l / (aT1l + aT2l + aT3l)
fT1l = -1.0 / 6.0 * s1l + 5.0 / 6.0 * s2l + 2.0 / 6.0 * s3l
fT2l = 2.0 / 6.0 * s2l + 5.0 / 6.0 * s3l - 1.0 / 6.0 * s4l
fT3l = 11.0 / 6.0 * s3l - 7.0 / 6.0 * s4l + 2.0 / 6.0 * s5l
# WENO3 interpolated PHI values at cell face...
frx = a1r * fT1r + a2r * fT2r + a3r * fT3r
flx = a1l * fT1l + a2l * fT2l + a3l * fT3l
# - WENO3 stencil in Y direction --------------
if vr > 0: # u = (+) Downwind
s1r = so[i, j - 2]
s2r = so[i, j - 1]
s3r = so[i, j]
s4r = so[i, j + 1]
s5r = so[i, j + 2]
rIS1r = (
13.0 / 12.0 * (s1r - 2.0 * s2r + s3r) ** 2.0
+ 1.0 / 4.0 * (s1r - 4.0 * s2r + 3.0 * s3r) ** 2.0
)
rIS2r = (
13.0 / 12.0 * (s2r - 2.0 * s3r + s4r) ** 2.0
+ 1.0 / 4.0 * (s2r - s4r) ** 2.0
)
rIS3r = (
13.0 / 12.0 * (s3r - 2.0 * s4r + s5r) ** 2.0
+ 1.0 / 4.0 * (3.0 * s3r - 4.0 * s4r + s5r) ** 2.0
)
aT1r = 1.0 / 10.0 / (eps + rIS1r) ** 2.0
aT2r = 6.0 / 10.0 / (eps + rIS2r) ** 2.0
aT3r = 3.0 / 10.0 / (eps + rIS3r) ** 2.0
a1r = aT1r / (aT1r + aT2r + aT3r)
a2r = aT2r / (aT1r + aT2r + aT3r)
a3r = aT3r / (aT1r + aT2r + aT3r)
fT1r = 2.0 / 6.0 * s1r - 7.0 / 6.0 * s2r + 11.0 / 6.0 * s3r
fT2r = -1.0 / 6.0 * s2r + 5.0 / 6.0 * s3r + 2.0 / 6.0 * s4r
fT3r = 2.0 / 6.0 * s3r + 5.0 / 6.0 * s4r - 1.0 / 6.0 * s5r
else: # u = (-) Upwind
s1r = so[i, j - 1]
s2r = so[i, j]
s3r = so[i, j + 1]
s4r = so[i, j + 2]
s5r = so[i, j + 3]
rIS1r = (
13.0 / 12.0 * (s1r - 2.0 * s2r + s3r) ** 2.0
+ 1.0 / 4.0 * (s1r - 4.0 * s2r + 3.0 * s3r) ** 2.0
)
rIS2r = (
13.0 / 12.0 * (s2r - 2.0 * s3r + s4r) ** 2.0
+ 1.0 / 4.0 * (s2r - s4r) ** 2.0
)
rIS3r = (
13.0 / 12.0 * (s3r - 2.0 * s4r + s5r) ** 2.0
+ 1.0 / 4.0 * (3.0 * s3r - 4.0 * s4r + s5r) ** 2.0
)
aT1r = 3.0 / 10.0 / (eps + rIS1r) ** 2.0
aT2r = 6.0 / 10.0 / (eps + rIS2r) ** 2.0
aT3r = 1.0 / 10.0 / (eps + rIS3r) ** 2.0
a1r = aT1r / (aT1r + aT2r + aT3r)
a2r = aT2r / (aT1r + aT2r + aT3r)
a3r = aT3r / (aT1r + aT2r + aT3r)
fT1r = -1.0 / 6.0 * s1r + 5.0 / 6.0 * s2r + 2.0 / 6.0 * s3r
fT2r = 2.0 / 6.0 * s2r + 5.0 / 6.0 * s3r - 1.0 / 6.0 * s4r
fT3r = 11.0 / 6.0 * s3r - 7.0 / 6.0 * s4r + 2.0 / 6.0 * s5r
if vl > 0: # u = (+) Downwind
s1l = so[i, j - 3]
s2l = so[i, j - 2]
s3l = so[i, j - 1]
s4l = so[i, j]
s5l = so[i, j + 1]
rIS1l = (
13.0 / 12.0 * (s1l - 2.0 * s2l + s3l) ** 2.0
+ 1.0 / 4.0 * (s1l - 4.0 * s2l + 3.0 * s3l) ** 2.0
)
rIS2l = (
13.0 / 12.0 * (s2l - 2.0 * s3l + s4l) ** 2.0
+ 1.0 / 4.0 * (s2l - s4l) ** 2.0
)
rIS3l = (
13.0 / 12.0 * (s3l - 2.0 * s4l + s5l) ** 2.0
+ 1.0 / 4.0 * (3.0 * s3l - 4.0 * s4l + s5l) ** 2.0
)
aT1l = 1.0 / 10.0 / (eps + rIS1l) ** 2.0
aT2l = 6.0 / 10.0 / (eps + rIS2l) ** 2.0
aT3l = 3.0 / 10.0 / (eps + rIS3l) ** 2.0
a1l = aT1l / (aT1l + aT2l + aT3l)
a2l = aT2l / (aT1l + aT2l + aT3l)
a3l = aT3l / (aT1l + aT2l + aT3l)
fT1l = 2.0 / 6.0 * s1l - 7.0 / 6.0 * s2l + 11.0 / 6.0 * s3l
fT2l = -1.0 / 6.0 * s2l + 5.0 / 6.0 * s3l + 2.0 / 6.0 * s4l
fT3l = 2.0 / 6.0 * s3l + 5.0 / 6.0 * s4l - 1.0 / 6.0 * s5l
else: # u = (-) Upwind
s1l = so[i, j - 2]
s2l = so[i, j - 1]
s3l = so[i, j]
s4l = so[i, j + 1]
s5l = so[i, j + 2]
rIS1l = (
13.0 / 12.0 * (s1l - 2.0 * s2l + s3l) ** 2.0
+ 1.0 / 4.0 * (s1l - 4.0 * s2l + 3.0 * s3l) ** 2.0
)
rIS2l = (
13.0 / 12.0 * (s2l - 2.0 * s3l + s4l) ** 2.0
+ 1.0 / 4.0 * (s2l - s4l) ** 2.0
)
rIS3l = (
13.0 / 12.0 * (s3l - 2.0 * s4l + s5l) ** 2.0
+ 1.0 / 4.0 * (3.0 * s3l - 4.0 * s4l + s5l) ** 2.0
)
aT1l = 3.0 / 10.0 / (eps + rIS1l) ** 2.0
aT2l = 6.0 / 10.0 / (eps + rIS2l) ** 2.0
aT3l = 1.0 / 10.0 / (eps + rIS3l) ** 2.0
a1l = aT1l / (aT1l + aT2l + aT3l)
a2l = aT2l / (aT1l + aT2l + aT3l)
a3l = aT3l / (aT1l + aT2l + aT3l)
fT1l = -1.0 / 6.0 * s1l + 5.0 / 6.0 * s2l + 2.0 / 6.0 * s3l
fT2l = 2.0 / 6.0 * s2l + 5.0 / 6.0 * s3l - 1.0 / 6.0 * s4l
fT3l = 11.0 / 6.0 * s3l - 7.0 / 6.0 * s4l + 2.0 / 6.0 * s5l
# WENO3 interpolated PHI values at cell face...
fry = a1r * fT1r + a2r * fT2r + a3r * fT3r
fly = a1l * fT1l + a2l * fT2l + a3l * fT3l
s[i, j] = (
so[i, j]
- dt * pfl[i, j] * (frx * ur - flx * ul) / dx
- dt * pfl[i, j] * (fry * vr - fly * vl) / dy
)
return
| 34.540897
| 85
| 0.323581
| 1,950
| 13,091
| 2.141026
| 0.064103
| 0.057485
| 0.051737
| 0.034491
| 0.841916
| 0.83521
| 0.788982
| 0.757844
| 0.752096
| 0.749222
| 0
| 0.235203
| 0.505691
| 13,091
| 378
| 86
| 34.632275
| 0.409983
| 0.040486
| 0
| 0.584838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01444
| false
| 0
| 0.00722
| 0
| 0.036101
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5c124271ebcc9b8e9a58660e8b11c9fd73485ea3
| 26,119
|
py
|
Python
|
python/ngsi_v2/ngsi_v2/api/attributes_api.py
|
orchestracities/sdk
|
9dd1e618d6c013ab916f3880df84c7882f6beec6
|
[
"Apache-2.0"
] | 2
|
2019-12-22T01:01:34.000Z
|
2021-07-03T20:30:03.000Z
|
python/ngsi_v2/ngsi_v2/api/attributes_api.py
|
orchestracities/sdk
|
9dd1e618d6c013ab916f3880df84c7882f6beec6
|
[
"Apache-2.0"
] | 2
|
2019-06-06T05:45:45.000Z
|
2019-06-06T09:03:10.000Z
|
python/ngsi_v2/ngsi_v2/api/attributes_api.py
|
orchestracities/sdk
|
9dd1e618d6c013ab916f3880df84c7882f6beec6
|
[
"Apache-2.0"
] | 2
|
2021-07-03T20:30:06.000Z
|
2021-11-30T21:55:02.000Z
|
# coding: utf-8
"""
ngsi_v2
NGSI V2 API RC-2018.07 # noqa: E501
The version of the OpenAPI document: 0.2.2
Contact: info@orchestracities.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ngsi_v2.api_client import ApiClient
from ngsi_v2.exceptions import (
ApiTypeError,
ApiValueError
)
class AttributesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_attribute_data(self, entity_id, attr_name, **kwargs): # noqa: E501
"""get_attribute_data # noqa: E501
Returns a JSON object with the attribute data of the attribute. The object follows the JSON Representation for attributes (described in [JSON Entity Representation](https://fiware.github.io/specifications/ngsiv2/stable) section). Response: * Successful operation uses 200 OK. * Errors use a non-2xx and (optionally) an error payload. See subsection on [Error Responses](https://fiware.github.io/specifications/ngsiv2/stable) for more details. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_attribute_data(entity_id, attr_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str entity_id: Id of the entity (required)
:param str attr_name: Name of the attribute to be retrieved. (required)
:param str fiware_service: When \"-multiservice\" is used, Orion uses the \"Fiware-Service\" HTTP header in the request to identify the service/tenant. If the header is not present in the HTTP request, the default service/tenant is used..
:param str fiware_service_path: Fiware-ServicePath is an optional header. It is assumed that all the entities created without Fiware-ServicePath (or that don't include service path information in the database) belongs to a root scope \"/\" implicitely.
:param str type: Entity type, to avoid ambiguity in the case there are several entities with the same entity id.
:param str metadata: A list of metadata names to include in the response. See [Filtering out attributes and metadata](https://fiware.github.io/specifications/ngsiv2/stable) section for more detail.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Attribute
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_attribute_data_with_http_info(entity_id, attr_name, **kwargs) # noqa: E501
def get_attribute_data_with_http_info(self, entity_id, attr_name, **kwargs): # noqa: E501
"""get_attribute_data # noqa: E501
Returns a JSON object with the attribute data of the attribute. The object follows the JSON Representation for attributes (described in [JSON Entity Representation](https://fiware.github.io/specifications/ngsiv2/stable) section). Response: * Successful operation uses 200 OK. * Errors use a non-2xx and (optionally) an error payload. See subsection on [Error Responses](https://fiware.github.io/specifications/ngsiv2/stable) for more details. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_attribute_data_with_http_info(entity_id, attr_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str entity_id: Id of the entity (required)
:param str attr_name: Name of the attribute to be retrieved. (required)
:param str fiware_service: When \"-multiservice\" is used, Orion uses the \"Fiware-Service\" HTTP header in the request to identify the service/tenant. If the header is not present in the HTTP request, the default service/tenant is used..
:param str fiware_service_path: Fiware-ServicePath is an optional header. It is assumed that all the entities created without Fiware-ServicePath (or that don't include service path information in the database) belongs to a root scope \"/\" implicitely.
:param str type: Entity type, to avoid ambiguity in the case there are several entities with the same entity id.
:param str metadata: A list of metadata names to include in the response. See [Filtering out attributes and metadata](https://fiware.github.io/specifications/ngsiv2/stable) section for more detail.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Attribute, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['entity_id', 'attr_name', 'fiware_service', 'fiware_service_path', 'type', 'metadata'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_attribute_data" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_id' is set
if ('entity_id' not in local_var_params or
local_var_params['entity_id'] is None):
raise ApiValueError("Missing the required parameter `entity_id` when calling `get_attribute_data`") # noqa: E501
# verify the required parameter 'attr_name' is set
if ('attr_name' not in local_var_params or
local_var_params['attr_name'] is None):
raise ApiValueError("Missing the required parameter `attr_name` when calling `get_attribute_data`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_id' in local_var_params:
path_params['entityId'] = local_var_params['entity_id'] # noqa: E501
if 'attr_name' in local_var_params:
path_params['attrName'] = local_var_params['attr_name'] # noqa: E501
query_params = []
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type'])) # noqa: E501
if 'metadata' in local_var_params:
query_params.append(('metadata', local_var_params['metadata'])) # noqa: E501
header_params = {}
if 'fiware_service' in local_var_params:
header_params['Fiware-Service'] = local_var_params['fiware_service'] # noqa: E501
if 'fiware_service_path' in local_var_params:
header_params['Fiware-ServicePath'] = local_var_params['fiware_service_path'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'BearerAuth'] # noqa: E501
return self.api_client.call_api(
'/entities/{entityId}/attrs/{attrName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Attribute', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_a_single_attribute(self, entity_id, attr_name, **kwargs): # noqa: E501
"""remove_a_single_attribute # noqa: E501
Removes an entity attribute. Response: * Successful operation uses 204 No Content * Errors use a non-2xx and (optionally) an error payload. See subsection on [Error Responses](https://fiware.github.io/specifications/ngsiv2/stable) for more details. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_a_single_attribute(entity_id, attr_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str entity_id: Id of the entity. (required)
:param str attr_name: Attribute name. (required)
:param str fiware_service: When \"-multiservice\" is used, Orion uses the \"Fiware-Service\" HTTP header in the request to identify the service/tenant. If the header is not present in the HTTP request, the default service/tenant is used..
:param str fiware_service_path: Fiware-ServicePath is an optional header. It is assumed that all the entities created without Fiware-ServicePath (or that don't include service path information in the database) belongs to a root scope \"/\" implicitely.
:param str type: Entity type, to avoid ambiguity in the case there are several entities with the same entity id.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.remove_a_single_attribute_with_http_info(entity_id, attr_name, **kwargs) # noqa: E501
def remove_a_single_attribute_with_http_info(self, entity_id, attr_name, **kwargs): # noqa: E501
"""remove_a_single_attribute # noqa: E501
Removes an entity attribute. Response: * Successful operation uses 204 No Content * Errors use a non-2xx and (optionally) an error payload. See subsection on [Error Responses](https://fiware.github.io/specifications/ngsiv2/stable) for more details. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_a_single_attribute_with_http_info(entity_id, attr_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str entity_id: Id of the entity. (required)
:param str attr_name: Attribute name. (required)
:param str fiware_service: When \"-multiservice\" is used, Orion uses the \"Fiware-Service\" HTTP header in the request to identify the service/tenant. If the header is not present in the HTTP request, the default service/tenant is used..
:param str fiware_service_path: Fiware-ServicePath is an optional header. It is assumed that all the entities created without Fiware-ServicePath (or that don't include service path information in the database) belongs to a root scope \"/\" implicitely.
:param str type: Entity type, to avoid ambiguity in the case there are several entities with the same entity id.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['entity_id', 'attr_name', 'fiware_service', 'fiware_service_path', 'type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_a_single_attribute" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_id' is set
if ('entity_id' not in local_var_params or
local_var_params['entity_id'] is None):
raise ApiValueError("Missing the required parameter `entity_id` when calling `remove_a_single_attribute`") # noqa: E501
# verify the required parameter 'attr_name' is set
if ('attr_name' not in local_var_params or
local_var_params['attr_name'] is None):
raise ApiValueError("Missing the required parameter `attr_name` when calling `remove_a_single_attribute`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_id' in local_var_params:
path_params['entityId'] = local_var_params['entity_id'] # noqa: E501
if 'attr_name' in local_var_params:
path_params['attrName'] = local_var_params['attr_name'] # noqa: E501
query_params = []
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type'])) # noqa: E501
header_params = {}
if 'fiware_service' in local_var_params:
header_params['Fiware-Service'] = local_var_params['fiware_service'] # noqa: E501
if 'fiware_service_path' in local_var_params:
header_params['Fiware-ServicePath'] = local_var_params['fiware_service_path'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'BearerAuth'] # noqa: E501
return self.api_client.call_api(
'/entities/{entityId}/attrs/{attrName}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_attribute_data(self, entity_id, attr_name, attribute, **kwargs): # noqa: E501
"""update_attribute_data # noqa: E501
The request payload is an object representing the new attribute data. Previous attribute data is replaced by the one in the request. The object follows the JSON Representation for attributes (described in [JSON Attribute Representation](https://fiware.github.io/specifications/ngsiv2/stable) section). Response: * Successful operation uses 204 No Content * Errors use a non-2xx and (optionally) an error payload. See subsection on [Error Responses](https://fiware.github.io/specifications/ngsiv2/stable) for more details. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_attribute_data(entity_id, attr_name, attribute, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str entity_id: Id of the entity to update (required)
:param str attr_name: Attribute name (required)
:param Attribute attribute: (required)
:param str fiware_service: When \"-multiservice\" is used, Orion uses the \"Fiware-Service\" HTTP header in the request to identify the service/tenant. If the header is not present in the HTTP request, the default service/tenant is used..
:param str fiware_service_path: Fiware-ServicePath is an optional header. It is assumed that all the entities created without Fiware-ServicePath (or that don't include service path information in the database) belongs to a root scope \"/\" implicitely.
:param str type: Entity type, to avoid ambiguity in case there are several entities with the same entity id.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_attribute_data_with_http_info(entity_id, attr_name, attribute, **kwargs) # noqa: E501
def update_attribute_data_with_http_info(self, entity_id, attr_name, attribute, **kwargs): # noqa: E501
"""update_attribute_data # noqa: E501
The request payload is an object representing the new attribute data. Previous attribute data is replaced by the one in the request. The object follows the JSON Representation for attributes (described in [JSON Attribute Representation](https://fiware.github.io/specifications/ngsiv2/stable) section). Response: * Successful operation uses 204 No Content * Errors use a non-2xx and (optionally) an error payload. See subsection on [Error Responses](https://fiware.github.io/specifications/ngsiv2/stable) for more details. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_attribute_data_with_http_info(entity_id, attr_name, attribute, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str entity_id: Id of the entity to update (required)
:param str attr_name: Attribute name (required)
:param Attribute attribute: (required)
:param str fiware_service: When \"-multiservice\" is used, Orion uses the \"Fiware-Service\" HTTP header in the request to identify the service/tenant. If the header is not present in the HTTP request, the default service/tenant is used..
:param str fiware_service_path: Fiware-ServicePath is an optional header. It is assumed that all the entities created without Fiware-ServicePath (or that don't include service path information in the database) belongs to a root scope \"/\" implicitely.
:param str type: Entity type, to avoid ambiguity in case there are several entities with the same entity id.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['entity_id', 'attr_name', 'attribute', 'fiware_service', 'fiware_service_path', 'type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_attribute_data" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_id' is set
if ('entity_id' not in local_var_params or
local_var_params['entity_id'] is None):
raise ApiValueError("Missing the required parameter `entity_id` when calling `update_attribute_data`") # noqa: E501
# verify the required parameter 'attr_name' is set
if ('attr_name' not in local_var_params or
local_var_params['attr_name'] is None):
raise ApiValueError("Missing the required parameter `attr_name` when calling `update_attribute_data`") # noqa: E501
# verify the required parameter 'attribute' is set
if ('attribute' not in local_var_params or
local_var_params['attribute'] is None):
raise ApiValueError("Missing the required parameter `attribute` when calling `update_attribute_data`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_id' in local_var_params:
path_params['entityId'] = local_var_params['entity_id'] # noqa: E501
if 'attr_name' in local_var_params:
path_params['attrName'] = local_var_params['attr_name'] # noqa: E501
query_params = []
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type'])) # noqa: E501
header_params = {}
if 'fiware_service' in local_var_params:
header_params['Fiware-Service'] = local_var_params['fiware_service'] # noqa: E501
if 'fiware_service_path' in local_var_params:
header_params['Fiware-ServicePath'] = local_var_params['fiware_service_path'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'attribute' in local_var_params:
body_params = local_var_params['attribute']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'BearerAuth'] # noqa: E501
return self.api_client.call_api(
'/entities/{entityId}/attrs/{attrName}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 59.496583
| 547
| 0.658869
| 3,275
| 26,119
| 5.052519
| 0.077863
| 0.037711
| 0.060917
| 0.023207
| 0.948148
| 0.944763
| 0.94422
| 0.937934
| 0.931347
| 0.929232
| 0
| 0.012861
| 0.264712
| 26,119
| 438
| 548
| 59.63242
| 0.848737
| 0.547724
| 0
| 0.753769
| 1
| 0
| 0.225939
| 0.04555
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035176
| false
| 0
| 0.025126
| 0
| 0.095477
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5c26f86ee49ca221016d5bc8e9731b9943325096
| 3,957
|
py
|
Python
|
QUANTAXIS/QASU/crawl_jrj_financial_reportdate.py
|
simplezhang57/QUANTAXIS
|
0fab23ee3cc4048a30b5eed3c311a5c9cdce8110
|
[
"MIT"
] | 2
|
2018-10-29T12:01:55.000Z
|
2021-03-05T10:28:59.000Z
|
QUANTAXIS/QASU/crawl_jrj_financial_reportdate.py
|
frosthaoz/QUANTAXIS
|
f5f482418e5f6e23ac3530089b8d17300d931b48
|
[
"MIT"
] | null | null | null |
QUANTAXIS/QASU/crawl_jrj_financial_reportdate.py
|
frosthaoz/QUANTAXIS
|
f5f482418e5f6e23ac3530089b8d17300d931b48
|
[
"MIT"
] | 2
|
2018-11-30T07:52:14.000Z
|
2021-05-28T23:00:20.000Z
|
from QUANTAXIS.QAFetch.QAcalendar import *
from QUANTAXIS.QAUtil import (DATABASE,QA_util_getBetweenQuarter, QA_util_get_next_day,
QA_util_get_real_date, QA_util_log_info,QA_util_add_months,
QA_util_to_json_from_pandas, trade_date_sse,QA_util_today_str,
QA_util_datetime_to_strdate)
import pandas as pd
import pymongo
def QA_SU_save_report_calendar_day(client=DATABASE, ui_log = None, ui_progress = None):
'''
save stock_day
保存财报日历
历史全部数据
:return:
'''
END_DATE = QA_util_datetime_to_strdate(QA_util_add_months(QA_util_today_str(),-3))
START_DATE = QA_util_datetime_to_strdate(QA_util_add_months(QA_util_today_str(),-12))
date_list = list(pd.DataFrame.from_dict(QA_util_getBetweenQuarter(START_DATE,END_DATE)).T.iloc[:,1])
report_calendar = client.report_calendar
report_calendar.create_index([("code", pymongo.ASCENDING), ("report_date", pymongo.ASCENDING)], unique=True)
err = []
def __saving_work(report_date, report_calendar):
try:
QA_util_log_info(
'##JOB01 Now Saving Report_Calendar==== {}'.format(str(report_date)), ui_log)
report_calendar.insert_many(QA_util_to_json_from_pandas(
QA_fetch_get_financial_calendar(report_date)), ordered=False)
except Exception as error0:
print(error0)
err.append(str(report_date))
for item in date_list:
QA_util_log_info('The {} of Total {}'.format
((date_list.index(item) +1), len(date_list)))
strProgressToLog = 'DOWNLOAD PROGRESS {}'.format(str(float((date_list.index(item) +1) / len(date_list) * 100))[0:4] + '%', ui_log)
intProgressToLog = int(float((date_list.index(item) +1) / len(date_list) * 100))
QA_util_log_info(strProgressToLog, ui_log= ui_log, ui_progress= ui_progress, ui_progress_int_value= intProgressToLog)
__saving_work( item, report_calendar)
if len(err) < 1:
QA_util_log_info('SUCCESS save report calendar ^_^', ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log)
QA_util_log_info(err, ui_log)
def QA_SU_save_report_calendar_his(client=DATABASE, ui_log = None, ui_progress = None):
'''
save stock_day
保存财报日历
反向查询四个季度财报
:return:
'''
START_DATE = '1996-01-01'
END_DATE = QA_util_datetime_to_strdate(QA_util_add_months(QA_util_today_str(),-3))
date_list = list(pd.DataFrame.from_dict(QA_util_getBetweenQuarter(START_DATE,END_DATE)).T.iloc[:,1])
report_calendar = client.report_calendar
report_calendar.create_index([("code", pymongo.ASCENDING), ("report_date", pymongo.ASCENDING)], unique=True)
err = []
def __saving_work(report_date, report_calendar):
try:
QA_util_log_info(
'##JOB01 Now Saving Report_Calendar==== {}'.format(str(report_date)), ui_log)
report_calendar.insert_many(QA_util_to_json_from_pandas(
QA_fetch_get_financial_calendar(report_date)), ordered=False)
except Exception as error0:
print(error0)
err.append(str(report_date))
for item in date_list:
QA_util_log_info('The {} of Total {}'.format
((date_list.index(item) +1), len(date_list)))
strProgressToLog = 'DOWNLOAD PROGRESS {}'.format(str(float((date_list.index(item) +1) / len(date_list) * 100))[0:4] + '%', ui_log)
intProgressToLog = int(float((date_list.index(item) + 1)/ len(date_list) * 100))
QA_util_log_info(strProgressToLog, ui_log= ui_log, ui_progress= ui_progress, ui_progress_int_value= intProgressToLog)
__saving_work( item, report_calendar)
if len(err) < 1:
QA_util_log_info('SUCCESS save report calendar ^_^', ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log)
QA_util_log_info(err, ui_log)
| 43.01087
| 138
| 0.671468
| 540
| 3,957
| 4.501852
| 0.190741
| 0.081448
| 0.048128
| 0.069519
| 0.89675
| 0.887289
| 0.851501
| 0.851501
| 0.851501
| 0.851501
| 0
| 0.014867
| 0.218095
| 3,957
| 92
| 139
| 43.01087
| 0.770847
| 0.019965
| 0
| 0.825397
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063492
| false
| 0
| 0.063492
| 0
| 0.126984
| 0.031746
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
308bde11bfa59c24926b7e12a66627c0d45a9163
| 91
|
py
|
Python
|
flask/app/code/__init__.py
|
aevear/Stonktastic
|
69a7b33c29492c8d76f5bec892eefb6606c2eaab
|
[
"MIT"
] | 1
|
2021-01-20T02:00:08.000Z
|
2021-01-20T02:00:08.000Z
|
flask/app/code/__init__.py
|
KKR959/Stonktastic
|
bd7a5f43fb899368886d86ffe4a5e37b0cd7ad4d
|
[
"MIT"
] | null | null | null |
flask/app/code/__init__.py
|
KKR959/Stonktastic
|
bd7a5f43fb899368886d86ffe4a5e37b0cd7ad4d
|
[
"MIT"
] | 1
|
2021-01-18T23:18:50.000Z
|
2021-01-18T23:18:50.000Z
|
"""
Imports all submodules
"""
from app.code import flask
from app.code import sqlQueries
| 13
| 31
| 0.758242
| 13
| 91
| 5.307692
| 0.692308
| 0.202899
| 0.318841
| 0.492754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 91
| 6
| 32
| 15.166667
| 0.896104
| 0.241758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
308c1bb31f2ca92b8a25993bd5e74e661bfe7d97
| 7,250
|
py
|
Python
|
tests/nodes/test_badges.py
|
Hengle/Houdini-Toolbox
|
a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3
|
[
"MIT"
] | 136
|
2015-01-03T04:03:23.000Z
|
2022-02-07T11:08:57.000Z
|
tests/nodes/test_badges.py
|
Hengle/Houdini-Toolbox
|
a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3
|
[
"MIT"
] | 11
|
2017-02-09T20:05:04.000Z
|
2021-01-24T22:25:59.000Z
|
tests/nodes/test_badges.py
|
Hengle/Houdini-Toolbox
|
a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3
|
[
"MIT"
] | 26
|
2015-08-18T12:11:02.000Z
|
2020-12-19T01:53:31.000Z
|
"""Tests for ht.nodes.badges module."""
# flake8: noqa
# =============================================================================
# IMPORTS
# =============================================================================
# Third Party
import pytest
# Houdini
import hou
badges = pytest.importorskip("ht.nodes.badges")
# =============================================================================
# TESTS
# =============================================================================
class Test_clear_generic_image_badge:
"""Test ht.nodes.badges.clear_generic_image_badge."""
def test_no_data(self, mocker):
"""Test when there is no user data to remove."""
mock_disabler = mocker.patch("ht.nodes.badges.hou.undos.disabler")
mock_data_name = mocker.patch(
"ht.nodes.badges._ht_generic_image_badge.get_generic_image_key"
)
mock_node = mocker.MagicMock(spec=hou.Node)
mock_node.destroyUserData.side_effect = hou.OperationFailed
badges.clear_generic_image_badge(mock_node)
mock_disabler.assert_called()
mock_node.destroyUserData.assert_called_with(mock_data_name.return_value)
def test(self, mocker):
"""Test when there is user data to remove."""
mock_disabler = mocker.patch("ht.nodes.badges.hou.undos.disabler")
mock_data_name = mocker.patch(
"ht.nodes.badges._ht_generic_image_badge.get_generic_image_key"
)
mock_node = mocker.MagicMock(spec=hou.Node)
badges.clear_generic_image_badge(mock_node)
mock_disabler.assert_called()
mock_node.destroyUserData.assert_called_with(mock_data_name.return_value)
class Test_clear_generic_text_badge:
"""Test ht.nodes.badges.clear_generic_text_badge."""
def test_no_data(self, mocker):
"""Test when there is no user data to remove."""
mock_disabler = mocker.patch("ht.nodes.badges.hou.undos.disabler")
mock_data_name = mocker.patch(
"ht.nodes.badges._ht_generic_text_badge.get_generic_text_key"
)
mock_clear_color = mocker.patch(
"ht.nodes.badges.clear_generic_text_badge_color"
)
mock_node = mocker.MagicMock(spec=hou.Node)
mock_node.destroyUserData.side_effect = hou.OperationFailed
badges.clear_generic_text_badge(mock_node)
mock_disabler.assert_called()
mock_node.destroyUserData.assert_called_with(mock_data_name.return_value)
mock_clear_color.assert_called()
def test(self, mocker):
"""Test when there is user data to remove."""
mock_disabler = mocker.patch("ht.nodes.badges.hou.undos.disabler")
mock_data_name = mocker.patch(
"ht.nodes.badges._ht_generic_text_badge.get_generic_text_key"
)
mock_clear_color = mocker.patch(
"ht.nodes.badges.clear_generic_text_badge_color"
)
mock_node = mocker.MagicMock(spec=hou.Node)
badges.clear_generic_text_badge(mock_node)
mock_disabler.assert_called()
mock_node.destroyUserData.assert_called_with(mock_data_name.return_value)
mock_clear_color.assert_called()
class Test_clear_generic_text_badge_color:
"""Test ht.nodes.badges.clear_generic_text_badge_color."""
def test_no_data(self, mocker):
"""Test when there is no user data to remove."""
mock_disabler = mocker.patch("ht.nodes.badges.hou.undos.disabler")
mock_data_name = mocker.patch(
"ht.nodes.badges._ht_generic_text_badge.get_generic_text_color_key"
)
mock_node = mocker.MagicMock(spec=hou.Node)
mock_node.destroyUserData.side_effect = hou.OperationFailed
badges.clear_generic_text_badge_color(mock_node)
mock_disabler.assert_called()
mock_node.destroyUserData.assert_called_with(mock_data_name.return_value)
def test(self, mocker):
"""Test when there is user data to remove."""
mock_disabler = mocker.patch("ht.nodes.badges.hou.undos.disabler")
mock_data_name = mocker.patch(
"ht.nodes.badges._ht_generic_text_badge.get_generic_text_color_key"
)
mock_node = mocker.MagicMock(spec=hou.Node)
badges.clear_generic_text_badge_color(mock_node)
mock_disabler.assert_called()
mock_node.destroyUserData.assert_called_with(mock_data_name.return_value)
def test_set_generic_image_badge(mocker):
"""Test ht.nodes.badges.set_generic_image_badge."""
mock_disabler = mocker.patch("ht.nodes.badges.hou.undos.disabler")
mock_data_name = mocker.patch(
"ht.nodes.badges._ht_generic_image_badge.get_generic_image_key"
)
mock_node = mocker.MagicMock(spec=hou.Node)
mock_value = mocker.MagicMock(spec=str)
badges.set_generic_image_badge(mock_node, mock_value)
mock_disabler.assert_called()
mock_node.setUserData.assert_called_with(mock_data_name.return_value, mock_value)
class Test_set_generic_text_badge:
"""Test ht.nodes.badges.set_generic_text_badge."""
def test_no_color(self, mocker):
"""Test not passing a color."""
mock_disabler = mocker.patch("ht.nodes.badges.hou.undos.disabler")
mock_data_name = mocker.patch(
"ht.nodes.badges._ht_generic_text_badge.get_generic_text_key"
)
mock_set_color = mocker.patch("ht.nodes.badges.set_generic_text_badge_color")
mock_node = mocker.MagicMock(spec=hou.Node)
mock_value = mocker.MagicMock(spec=str)
badges.set_generic_text_badge(mock_node, mock_value)
mock_disabler.assert_called()
mock_node.setUserData.assert_called_with(
mock_data_name.return_value, mock_value
)
mock_set_color.assert_not_called()
def test_color(self, mocker):
"""Test passing a color."""
mock_disabler = mocker.patch("ht.nodes.badges.hou.undos.disabler")
mock_data_name = mocker.patch(
"ht.nodes.badges._ht_generic_text_badge.get_generic_text_key"
)
mock_set_color = mocker.patch("ht.nodes.badges.set_generic_text_badge_color")
mock_color = mocker.MagicMock(spec=hou.Color)
mock_value = mocker.MagicMock(spec=str)
mock_node = mocker.MagicMock(spec=hou.Node)
badges.set_generic_text_badge(mock_node, mock_value, mock_color)
mock_disabler.assert_called()
mock_node.setUserData.assert_called_with(
mock_data_name.return_value, mock_value
)
mock_set_color.assert_called_with(mock_node, mock_color)
def test_set_generic_text_badge_color(mocker):
"""Test ht.nodes.badges.set_generic_text_badge_color."""
mock_disabler = mocker.patch("ht.nodes.badges.hou.undos.disabler")
mock_data_name = mocker.patch(
"ht.nodes.badges._ht_generic_text_badge.get_generic_text_color_key"
)
mock_node = mocker.MagicMock(spec=hou.Node)
mock_color = mocker.MagicMock(spec=hou.Color)
mock_color.rgb.return_value = (0.1, 0.2, 0.3)
badges.set_generic_text_badge_color(mock_node, mock_color)
mock_disabler.assert_called()
mock_node.setUserData.assert_called_with(
mock_data_name.return_value, "rgb 0.1 0.2 0.3"
)
| 32.80543
| 85
| 0.675034
| 935
| 7,250
| 4.857754
| 0.069519
| 0.059886
| 0.09159
| 0.095112
| 0.937472
| 0.927565
| 0.904007
| 0.880889
| 0.823646
| 0.816601
| 0
| 0.002211
| 0.189103
| 7,250
| 220
| 86
| 32.954545
| 0.770369
| 0.135172
| 0
| 0.717742
| 0
| 0
| 0.188167
| 0.183317
| 0
| 0
| 0
| 0
| 0.193548
| 1
| 0.080645
| false
| 0
| 0.024194
| 0
| 0.137097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
30ec921e2c1ea64398e9cafc9ace1472f255883a
| 89
|
py
|
Python
|
iterify/iterify.py
|
mmore500/iterify
|
046a7a49117973d6c5b4bf7254f78fd6070aa25b
|
[
"MIT"
] | null | null | null |
iterify/iterify.py
|
mmore500/iterify
|
046a7a49117973d6c5b4bf7254f78fd6070aa25b
|
[
"MIT"
] | null | null | null |
iterify/iterify.py
|
mmore500/iterify
|
046a7a49117973d6c5b4bf7254f78fd6070aa25b
|
[
"MIT"
] | null | null | null |
import typing
def iterify(*args) -> typing.Iterator[typing.Any]:
return iter(args)
| 14.833333
| 50
| 0.707865
| 12
| 89
| 5.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157303
| 89
| 5
| 51
| 17.8
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
a51a5f01fecaac583f7abb3752321c4e2b4d22b2
| 1,885
|
py
|
Python
|
Target/Target.py
|
shyamjangid07/Reverse-Engineering
|
469efabcd6057f7895d8d891f1fabdf2ffe730b0
|
[
"Apache-2.0"
] | 337
|
2020-08-15T12:22:14.000Z
|
2022-03-29T06:05:15.000Z
|
Target/Target.py
|
ajairakaam/Reverse-Engineering
|
49d00bafd0622ffb79e081946a19c5fd3a42628f
|
[
"Apache-2.0"
] | 3
|
2020-11-12T14:30:48.000Z
|
2021-05-18T16:56:22.000Z
|
Target/Target.py
|
ajairakaam/Reverse-Engineering
|
49d00bafd0622ffb79e081946a19c5fd3a42628f
|
[
"Apache-2.0"
] | 83
|
2020-08-15T00:22:58.000Z
|
2022-03-31T08:40:23.000Z
|
#Original Written By Tech Qaiser
#Youtube Tech Qaiser
import marshal,zlib,base64
exec(marshal.loads(zlib.decompress(base64.b64decode("eJzFVUtTG0cQ7pEEBoEAG2z8ALtxKhVRDpLBjzg2hYMxGMoWdoJccuRyUSvNIK20u6PMjCzk8iFV/IPckkvuueY/5HfkmlvOuSQ9s4uI7aRyzKKd7el399cz1CF5hun9gl79cwqA049BAFAd0AyqDASDVgp4Co4YMAHQSgNPu01/6lgxA9UMVKJ5yIghaGdBecAY4xl4tC2G4QiARQxevGMsTgEfslR1BBLB8EBwyglGoT8NIgtiFPgIHKWgOpYEGXdBfnFBRv8xSOyLk/EIzPIxmCWlWDIeS3JkdviWiTF4+Mqwag5EDvgEtFOgo5SlJy2tLrFv/oD+HBMZaE3YINVJEJPQmgJxOna0AA1mWftnnCAL/IzL3qXOp4HPuNRnnPQscPqdI0eMUWaWdQ74LKxSxGRzHlb5Bedw1rHOu9LEDLQuvBOgddEKXMdIdsl5TTZO0poDftFlMg9iHlqXgTslccVuiX+UtvC106A+SfE5W79tEpljHDHGfT72mjR4EvhlEAtWmdikX6H2ZMRVh0eYsnhcgf/yQiYv6K28CwjCAKLhQSX/7oTGbi9/lYbX/5OeXTNEZD0QnmrEs/3rff0TfV4uvMKS5N1A4FpJ1Jte5L8Rq7grDW7JbsQXskhPuelrDGM1omQU9NF77fmBV7OcCDt905QRrhQO8W7RmTyjWNrKtPGCAMNj35jv+J0P2YvoRRxVN0LTFNhRsqG8EHu+aSa+V/R1m+7vP3737Su89HL53ue3wjJVIww+10JFXiiwiD6nZTOkzHBpaQ2x8cP39vntvp4j66yzuxlWpOKBrw3y10HBHBpcW0PDSEHFarH/5dA2Z8unCk/aoaetQqJxI9xQXr3tRw3UE8Q/5u7KXqFQ8K1H/fGJw9theXsTN56WSuu7D/fwwddY3tzYxi/Xd/Y2v8oai4rLAnWWVlvVktcQkdE0hlCSb/wg8Iq3CtcxX/EjLnsad8t4u7C8iOudTiAqovbYN8VbNz4r3LiN+cfb5dKTTzHw2wIfiXpbLuJGU8lQFG8uF64XVlZW7pCrPe/AU35ipGcoUNOYjr5bLB54dVGTsl2oy1BPkWArYSw9kQ3bNU2XIvSuGfuJlO/St1MmbPtNhqiOp7Ujnj3dK2sre17eWrpjRm0YymTfr8tIzw86Z5Hp4TOy6hFCccfxLuprFpoQ308A375FLoIP+M7j3yCsKEkIHbtdwDXU905QWQl3kEYtIoiV0B1KyLdDvSUVrkd9LPla2wmzQ7156BsCu1DQ5UHOyyG6MaGaBceyRDcS+EDUvS7Nf7kplMAd7fxvSKVE3ZxUSCenL7sKbaPwCU1k3jbz/4I//5GF0OJl/FA4XKU29p+g7msjQoeuDoToOAQHZ9dMuwTt7UAHxaG2qZRUzpWgnjl15fX2/ajTNY5NvVHGnpkeHUVt7CnbpSPsZLJDVY9YWXJOzWk7fv777keSEbNq5hRtHijqCvm1dC2hrakWZp9S5YHYV7ImjXaVbHkBCWyvPc6bwuNCaVf0Qc2k6cOp+DFnHRBo+wdShS49Rwy7BtDFxOMGdWsh1TnkCvJNXIcin47QQrSdGhd1yWOhEYdxJw4ITBcvoKrjW1pSXpajTZz/Y9GvSU/xncgIpbodk7eH7WRxJ5E3XENW42t6zdI6omWcjadyLMfSbIrWITZGu3Fm3zSbHnByKUvl2HmWTqUT+ST9WZscG2VZNkffs2zB0TmWTcW2lp5MbC39F33HJ+s="))))
| 314.166667
| 1,802
| 0.959151
| 66
| 1,885
| 27.393939
| 0.924242
| 0.011062
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15323
| 0.006366
| 1,885
| 5
| 1,803
| 377
| 0.812066
| 0.026525
| 0
| 0
| 0
| 0.5
| 0.951446
| 0.951446
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
ebe33264490b75b9300b62ed3132ed51460847ab
| 309
|
py
|
Python
|
tests/sample_app/models.py
|
elbaschid/django-removalist
|
c1f189e6df00860bf307b9bb9c3f5f791063523d
|
[
"MIT"
] | null | null | null |
tests/sample_app/models.py
|
elbaschid/django-removalist
|
c1f189e6df00860bf307b9bb9c3f5f791063523d
|
[
"MIT"
] | 1
|
2020-06-02T08:38:31.000Z
|
2020-06-02T08:38:31.000Z
|
tests/sample_app/models.py
|
elbaschid/django-removalist
|
c1f189e6df00860bf307b9bb9c3f5f791063523d
|
[
"MIT"
] | null | null | null |
from django.db import models
class NewUser(models.Model):
text = models.TextField()
number = models.IntegerField()
group = models.ForeignKey('auth.Group')
class OldUser(models.Model):
text = models.TextField()
number = models.IntegerField()
group = models.ForeignKey('auth.Group')
| 22.071429
| 43
| 0.699029
| 35
| 309
| 6.171429
| 0.457143
| 0.101852
| 0.138889
| 0.194444
| 0.777778
| 0.777778
| 0.777778
| 0.777778
| 0.777778
| 0.777778
| 0
| 0
| 0.174757
| 309
| 13
| 44
| 23.769231
| 0.847059
| 0
| 0
| 0.666667
| 0
| 0
| 0.064725
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
6921db2bf38a1a6dd55ca3d5005d0b6d33853b58
| 27,977
|
py
|
Python
|
sdk/python/pulumi_azure/cosmosdb/cassandra_datacenter.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/cosmosdb/cassandra_datacenter.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/cosmosdb/cassandra_datacenter.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['CassandraDatacenterArgs', 'CassandraDatacenter']
@pulumi.input_type
class CassandraDatacenterArgs:
def __init__(__self__, *,
cassandra_cluster_id: pulumi.Input[str],
delegated_management_subnet_id: pulumi.Input[str],
availability_zones_enabled: Optional[pulumi.Input[bool]] = None,
disk_count: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
node_count: Optional[pulumi.Input[int]] = None,
sku_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a CassandraDatacenter resource.
:param pulumi.Input[str] cassandra_cluster_id: The ID of the Cassandra Cluster. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[str] delegated_management_subnet_id: The ID of the delegated management subnet for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[bool] availability_zones_enabled: Determines whether availability zones are enabled. Defaults to `true`.
:param pulumi.Input[int] disk_count: Determines the number of p30 disks that are attached to each node. Defaults to `4`.
:param pulumi.Input[str] location: The Azure Region where the Cassandra Datacenter should exist. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[str] name: The name which should be used for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[int] node_count: The number of nodes the Cassandra Datacenter should have. The number should be equal or greater than `3`. Defaults to `3`.
:param pulumi.Input[str] sku_name: Determines the selected sku. Defaults to Standard_DS14_v2.
"""
pulumi.set(__self__, "cassandra_cluster_id", cassandra_cluster_id)
pulumi.set(__self__, "delegated_management_subnet_id", delegated_management_subnet_id)
if availability_zones_enabled is not None:
pulumi.set(__self__, "availability_zones_enabled", availability_zones_enabled)
if disk_count is not None:
pulumi.set(__self__, "disk_count", disk_count)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if node_count is not None:
pulumi.set(__self__, "node_count", node_count)
if sku_name is not None:
pulumi.set(__self__, "sku_name", sku_name)
@property
@pulumi.getter(name="cassandraClusterId")
def cassandra_cluster_id(self) -> pulumi.Input[str]:
"""
The ID of the Cassandra Cluster. Changing this forces a new Cassandra Datacenter to be created.
"""
return pulumi.get(self, "cassandra_cluster_id")
@cassandra_cluster_id.setter
def cassandra_cluster_id(self, value: pulumi.Input[str]):
pulumi.set(self, "cassandra_cluster_id", value)
@property
@pulumi.getter(name="delegatedManagementSubnetId")
def delegated_management_subnet_id(self) -> pulumi.Input[str]:
"""
The ID of the delegated management subnet for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
"""
return pulumi.get(self, "delegated_management_subnet_id")
@delegated_management_subnet_id.setter
def delegated_management_subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "delegated_management_subnet_id", value)
@property
@pulumi.getter(name="availabilityZonesEnabled")
def availability_zones_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Determines whether availability zones are enabled. Defaults to `true`.
"""
return pulumi.get(self, "availability_zones_enabled")
@availability_zones_enabled.setter
def availability_zones_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "availability_zones_enabled", value)
@property
@pulumi.getter(name="diskCount")
def disk_count(self) -> Optional[pulumi.Input[int]]:
"""
Determines the number of p30 disks that are attached to each node. Defaults to `4`.
"""
return pulumi.get(self, "disk_count")
@disk_count.setter
def disk_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disk_count", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the Cassandra Datacenter should exist. Changing this forces a new Cassandra Datacenter to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodeCount")
def node_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of nodes the Cassandra Datacenter should have. The number should be equal or greater than `3`. Defaults to `3`.
"""
return pulumi.get(self, "node_count")
@node_count.setter
def node_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "node_count", value)
@property
@pulumi.getter(name="skuName")
def sku_name(self) -> Optional[pulumi.Input[str]]:
"""
Determines the selected sku. Defaults to Standard_DS14_v2.
"""
return pulumi.get(self, "sku_name")
@sku_name.setter
def sku_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sku_name", value)
@pulumi.input_type
class _CassandraDatacenterState:
def __init__(__self__, *,
availability_zones_enabled: Optional[pulumi.Input[bool]] = None,
cassandra_cluster_id: Optional[pulumi.Input[str]] = None,
delegated_management_subnet_id: Optional[pulumi.Input[str]] = None,
disk_count: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
node_count: Optional[pulumi.Input[int]] = None,
sku_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering CassandraDatacenter resources.
:param pulumi.Input[bool] availability_zones_enabled: Determines whether availability zones are enabled. Defaults to `true`.
:param pulumi.Input[str] cassandra_cluster_id: The ID of the Cassandra Cluster. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[str] delegated_management_subnet_id: The ID of the delegated management subnet for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[int] disk_count: Determines the number of p30 disks that are attached to each node. Defaults to `4`.
:param pulumi.Input[str] location: The Azure Region where the Cassandra Datacenter should exist. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[str] name: The name which should be used for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[int] node_count: The number of nodes the Cassandra Datacenter should have. The number should be equal or greater than `3`. Defaults to `3`.
:param pulumi.Input[str] sku_name: Determines the selected sku. Defaults to Standard_DS14_v2.
"""
if availability_zones_enabled is not None:
pulumi.set(__self__, "availability_zones_enabled", availability_zones_enabled)
if cassandra_cluster_id is not None:
pulumi.set(__self__, "cassandra_cluster_id", cassandra_cluster_id)
if delegated_management_subnet_id is not None:
pulumi.set(__self__, "delegated_management_subnet_id", delegated_management_subnet_id)
if disk_count is not None:
pulumi.set(__self__, "disk_count", disk_count)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if node_count is not None:
pulumi.set(__self__, "node_count", node_count)
if sku_name is not None:
pulumi.set(__self__, "sku_name", sku_name)
@property
@pulumi.getter(name="availabilityZonesEnabled")
def availability_zones_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Determines whether availability zones are enabled. Defaults to `true`.
"""
return pulumi.get(self, "availability_zones_enabled")
@availability_zones_enabled.setter
def availability_zones_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "availability_zones_enabled", value)
@property
@pulumi.getter(name="cassandraClusterId")
def cassandra_cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Cassandra Cluster. Changing this forces a new Cassandra Datacenter to be created.
"""
return pulumi.get(self, "cassandra_cluster_id")
@cassandra_cluster_id.setter
def cassandra_cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cassandra_cluster_id", value)
@property
@pulumi.getter(name="delegatedManagementSubnetId")
def delegated_management_subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the delegated management subnet for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
"""
return pulumi.get(self, "delegated_management_subnet_id")
@delegated_management_subnet_id.setter
def delegated_management_subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "delegated_management_subnet_id", value)
@property
@pulumi.getter(name="diskCount")
def disk_count(self) -> Optional[pulumi.Input[int]]:
"""
Determines the number of p30 disks that are attached to each node. Defaults to `4`.
"""
return pulumi.get(self, "disk_count")
@disk_count.setter
def disk_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disk_count", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the Cassandra Datacenter should exist. Changing this forces a new Cassandra Datacenter to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodeCount")
def node_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of nodes the Cassandra Datacenter should have. The number should be equal or greater than `3`. Defaults to `3`.
"""
return pulumi.get(self, "node_count")
@node_count.setter
def node_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "node_count", value)
@property
@pulumi.getter(name="skuName")
def sku_name(self) -> Optional[pulumi.Input[str]]:
"""
Determines the selected sku. Defaults to Standard_DS14_v2.
"""
return pulumi.get(self, "sku_name")
@sku_name.setter
def sku_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sku_name", value)
class CassandraDatacenter(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
availability_zones_enabled: Optional[pulumi.Input[bool]] = None,
cassandra_cluster_id: Optional[pulumi.Input[str]] = None,
delegated_management_subnet_id: Optional[pulumi.Input[str]] = None,
disk_count: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
node_count: Optional[pulumi.Input[int]] = None,
sku_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Cassandra Datacenter.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
address_spaces=["10.0.0.0/16"])
example_subnet = azure.network.Subnet("exampleSubnet",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.1.0/24"])
example_assignment = azure.authorization.Assignment("exampleAssignment",
scope=example_virtual_network.id,
role_definition_name="Network Contributor",
principal_id="e5007d2c-4b13-4a74-9b6a-605d99f03501")
example_cassandra_cluster = azure.cosmosdb.CassandraCluster("exampleCassandraCluster",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
delegated_management_subnet_id=example_subnet.id,
default_admin_password="Password1234")
example_cassandra_datacenter = azure.cosmosdb.CassandraDatacenter("exampleCassandraDatacenter",
location=example_cassandra_cluster.location,
cassandra_cluster_id=example_cassandra_cluster.id,
delegated_management_subnet_id=example_subnet.id,
node_count=3,
disk_count=4,
sku_name="Standard_DS14_v2",
availability_zones_enabled=False)
```
## Import
Cassandra Datacenters can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:cosmosdb/cassandraDatacenter:CassandraDatacenter example /subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DocumentDB/cassandraClusters/cluster1/dataCenters/dc1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] availability_zones_enabled: Determines whether availability zones are enabled. Defaults to `true`.
:param pulumi.Input[str] cassandra_cluster_id: The ID of the Cassandra Cluster. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[str] delegated_management_subnet_id: The ID of the delegated management subnet for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[int] disk_count: Determines the number of p30 disks that are attached to each node. Defaults to `4`.
:param pulumi.Input[str] location: The Azure Region where the Cassandra Datacenter should exist. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[str] name: The name which should be used for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[int] node_count: The number of nodes the Cassandra Datacenter should have. The number should be equal or greater than `3`. Defaults to `3`.
:param pulumi.Input[str] sku_name: Determines the selected sku. Defaults to Standard_DS14_v2.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CassandraDatacenterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Cassandra Datacenter.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
address_spaces=["10.0.0.0/16"])
example_subnet = azure.network.Subnet("exampleSubnet",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.1.0/24"])
example_assignment = azure.authorization.Assignment("exampleAssignment",
scope=example_virtual_network.id,
role_definition_name="Network Contributor",
principal_id="e5007d2c-4b13-4a74-9b6a-605d99f03501")
example_cassandra_cluster = azure.cosmosdb.CassandraCluster("exampleCassandraCluster",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
delegated_management_subnet_id=example_subnet.id,
default_admin_password="Password1234")
example_cassandra_datacenter = azure.cosmosdb.CassandraDatacenter("exampleCassandraDatacenter",
location=example_cassandra_cluster.location,
cassandra_cluster_id=example_cassandra_cluster.id,
delegated_management_subnet_id=example_subnet.id,
node_count=3,
disk_count=4,
sku_name="Standard_DS14_v2",
availability_zones_enabled=False)
```
## Import
Cassandra Datacenters can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:cosmosdb/cassandraDatacenter:CassandraDatacenter example /subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DocumentDB/cassandraClusters/cluster1/dataCenters/dc1
```
:param str resource_name: The name of the resource.
:param CassandraDatacenterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CassandraDatacenterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
availability_zones_enabled: Optional[pulumi.Input[bool]] = None,
cassandra_cluster_id: Optional[pulumi.Input[str]] = None,
delegated_management_subnet_id: Optional[pulumi.Input[str]] = None,
disk_count: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
node_count: Optional[pulumi.Input[int]] = None,
sku_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CassandraDatacenterArgs.__new__(CassandraDatacenterArgs)
__props__.__dict__["availability_zones_enabled"] = availability_zones_enabled
if cassandra_cluster_id is None and not opts.urn:
raise TypeError("Missing required property 'cassandra_cluster_id'")
__props__.__dict__["cassandra_cluster_id"] = cassandra_cluster_id
if delegated_management_subnet_id is None and not opts.urn:
raise TypeError("Missing required property 'delegated_management_subnet_id'")
__props__.__dict__["delegated_management_subnet_id"] = delegated_management_subnet_id
__props__.__dict__["disk_count"] = disk_count
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["node_count"] = node_count
__props__.__dict__["sku_name"] = sku_name
super(CassandraDatacenter, __self__).__init__(
'azure:cosmosdb/cassandraDatacenter:CassandraDatacenter',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
availability_zones_enabled: Optional[pulumi.Input[bool]] = None,
cassandra_cluster_id: Optional[pulumi.Input[str]] = None,
delegated_management_subnet_id: Optional[pulumi.Input[str]] = None,
disk_count: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
node_count: Optional[pulumi.Input[int]] = None,
sku_name: Optional[pulumi.Input[str]] = None) -> 'CassandraDatacenter':
"""
Get an existing CassandraDatacenter resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] availability_zones_enabled: Determines whether availability zones are enabled. Defaults to `true`.
:param pulumi.Input[str] cassandra_cluster_id: The ID of the Cassandra Cluster. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[str] delegated_management_subnet_id: The ID of the delegated management subnet for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[int] disk_count: Determines the number of p30 disks that are attached to each node. Defaults to `4`.
:param pulumi.Input[str] location: The Azure Region where the Cassandra Datacenter should exist. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[str] name: The name which should be used for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
:param pulumi.Input[int] node_count: The number of nodes the Cassandra Datacenter should have. The number should be equal or greater than `3`. Defaults to `3`.
:param pulumi.Input[str] sku_name: Determines the selected sku. Defaults to Standard_DS14_v2.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _CassandraDatacenterState.__new__(_CassandraDatacenterState)
__props__.__dict__["availability_zones_enabled"] = availability_zones_enabled
__props__.__dict__["cassandra_cluster_id"] = cassandra_cluster_id
__props__.__dict__["delegated_management_subnet_id"] = delegated_management_subnet_id
__props__.__dict__["disk_count"] = disk_count
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["node_count"] = node_count
__props__.__dict__["sku_name"] = sku_name
return CassandraDatacenter(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="availabilityZonesEnabled")
def availability_zones_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Determines whether availability zones are enabled. Defaults to `true`.
"""
return pulumi.get(self, "availability_zones_enabled")
@property
@pulumi.getter(name="cassandraClusterId")
def cassandra_cluster_id(self) -> pulumi.Output[str]:
"""
The ID of the Cassandra Cluster. Changing this forces a new Cassandra Datacenter to be created.
"""
return pulumi.get(self, "cassandra_cluster_id")
@property
@pulumi.getter(name="delegatedManagementSubnetId")
def delegated_management_subnet_id(self) -> pulumi.Output[str]:
"""
The ID of the delegated management subnet for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
"""
return pulumi.get(self, "delegated_management_subnet_id")
@property
@pulumi.getter(name="diskCount")
def disk_count(self) -> pulumi.Output[Optional[int]]:
"""
Determines the number of p30 disks that are attached to each node. Defaults to `4`.
"""
return pulumi.get(self, "disk_count")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The Azure Region where the Cassandra Datacenter should exist. Changing this forces a new Cassandra Datacenter to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name which should be used for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nodeCount")
def node_count(self) -> pulumi.Output[Optional[int]]:
"""
The number of nodes the Cassandra Datacenter should have. The number should be equal or greater than `3`. Defaults to `3`.
"""
return pulumi.get(self, "node_count")
@property
@pulumi.getter(name="skuName")
def sku_name(self) -> pulumi.Output[Optional[str]]:
"""
Determines the selected sku. Defaults to Standard_DS14_v2.
"""
return pulumi.get(self, "sku_name")
| 49.692718
| 238
| 0.681882
| 3,297
| 27,977
| 5.547164
| 0.0734
| 0.064957
| 0.051288
| 0.046913
| 0.901854
| 0.891519
| 0.884357
| 0.87703
| 0.86522
| 0.853354
| 0
| 0.010151
| 0.228867
| 27,977
| 562
| 239
| 49.781139
| 0.837582
| 0.417236
| 0
| 0.780069
| 1
| 0
| 0.11569
| 0.0535
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161512
| false
| 0.003436
| 0.017182
| 0
| 0.274914
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
694c0032f4c7d5d746960a20dd7c098e01fcfea2
| 7,389
|
py
|
Python
|
dataset_def.py
|
SidRama/Longitudinal-VAE
|
3b8a341da14063728dd37a8e76b4372eb5256c97
|
[
"MIT"
] | 4
|
2021-04-02T04:06:45.000Z
|
2022-03-11T08:18:36.000Z
|
dataset_def.py
|
SidRama/Longitudinal-VAE
|
3b8a341da14063728dd37a8e76b4372eb5256c97
|
[
"MIT"
] | 1
|
2021-10-30T14:00:59.000Z
|
2021-10-30T14:00:59.000Z
|
dataset_def.py
|
SidRama/Longitudinal-VAE
|
3b8a341da14063728dd37a8e76b4372eb5256c97
|
[
"MIT"
] | 1
|
2022-02-19T07:23:08.000Z
|
2022-02-19T07:23:08.000Z
|
from torch.utils.data import Dataset
import pandas as pd
import os
import torch
import numpy as np
class PhysionetDataset(Dataset):
"""
Dataset definition for the Physionet Challenge 2012 dataset.
"""
def __init__(self, data_file, root_dir, transform=None):
data = np.load(os.path.join(root_dir, data_file))
self.data_source = data['data_readings'].reshape(-1, data['data_readings'].shape[-1])
self.label_source = data['outcome_attrib'].reshape(-1, data['outcome_attrib'].shape[-1])
self.mask_source = data['data_mask'].reshape(-1, data['data_mask'].shape[-1])
self.label_mask_source = data['outcome_mask'].reshape(-1, data['outcome_mask'].shape[-1])
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.data_source)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
patient_data = self.data_source[idx, :]
patient_data = torch.Tensor(np.array(patient_data))
mask = self.mask_source[idx, :]
mask = np.array(mask, dtype='uint8')
label = self.label_source[idx, :]
label[8] = label[8] - 24
label_mask = self.label_mask_source[idx, :]
label = torch.Tensor(np.concatenate((label, label_mask)))
if self.transform:
patient_data = self.transform(patient_data)
sample = {'data': patient_data, 'label': label, 'idx': idx, 'mask': mask}
return sample
class RotatedMNISTDataset(Dataset):
"""
Dataset definition for the rotated MNIST dataset when using simple MLP-based VAE.
Data formatted as dataset_length x 784.
"""
def __init__(self, data_file, label_file, root_dir, mask_file=None, transform=None):
data = np.load(os.path.join(root_dir, data_file))
label = np.load(os.path.join(root_dir, label_file))
self.data_source = data.reshape(-1, data.shape[-1])
self.label_source = label.reshape(label.shape[0], -1).T
if mask_file is not None:
self.mask_source = np.load(os.path.join(root_dir, mask_file))
else:
self.mask_source = np.ones_like(self.data_source)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.data_source)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
digit = self.data_source[idx, :]
digit = np.array([digit])
mask = self.mask_source[idx, :]
mask = np.array([mask], dtype='uint8')
label = self.label_source[idx, :]
label = torch.Tensor(np.array(label))
if self.transform:
digit = self.transform(digit)
sample = {'digit': digit, 'label': label, 'idx': idx, 'mask': mask}
return sample
class RotatedMNISTDatasetConv(Dataset):
"""
Dataset definiton for the rotated MNIST dataset when using CNN-based VAE.
Data formatted as dataset_length x 28 x 28.
"""
def __init__(self, data_file, label_file, root_dir, mask_file=None, transform=None):
data = np.load(os.path.join(root_dir, data_file))
label = np.load(os.path.join(root_dir, label_file))
self.data_source = data.reshape(-1, data.shape[-1])
self.label_source = label.reshape(label.shape[0], -1).T
if mask_file is not None:
self.mask_source = np.load(os.path.join(root_dir, mask_file))
else:
self.mask_source = np.ones_like(self.data_source)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.data_source)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
digit = self.data_source[idx, :]
digit = np.array(digit)
digit = digit.reshape(28, 28)
digit = digit[..., np.newaxis]
mask = self.mask_source[idx, :]
mask = np.array([mask], dtype='uint8')
label = self.label_source[idx, :]
label = torch.Tensor(np.array(label))
if self.transform:
digit = self.transform(digit)
sample = {'digit': digit, 'label': label, 'idx': idx, 'mask': mask}
return sample
class HealthMNISTDataset(Dataset):
"""
Dataset definition for the Health MNIST dataset when using simple MLP-based VAE.
Data formatted as dataset_length x 1296.
"""
def __init__(self, csv_file_data, csv_file_label, mask_file, root_dir, transform=None):
self.data_source = pd.read_csv(os.path.join(root_dir, csv_file_data), header=None)
self.mask_source = pd.read_csv(os.path.join(root_dir, mask_file), header=None)
self.label_source = pd.read_csv(os.path.join(root_dir, csv_file_label), header=0)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.data_source)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
digit = self.data_source.iloc[idx, :]
digit = np.array([digit], dtype='uint8')
mask = self.mask_source.iloc[idx, :]
mask = np.array([mask], dtype='uint8')
label = self.label_source.iloc[idx, :]
# changed
# time_age, disease_time, subject, gender, disease, location
label = torch.Tensor(np.nan_to_num(np.array(label[np.array([6, 4, 0, 5, 3, 7])])))
if self.transform:
digit = self.transform(digit)
sample = {'digit': digit, 'label': label, 'idx': idx, 'mask': mask}
return sample
class HealthMNISTDatasetConv(Dataset):
"""
Dataset definiton for the Health MNIST dataset when using CNN-based VAE.
Data formatted as dataset_length x 36 x 36.
"""
def __init__(self, csv_file_data, csv_file_label, mask_file, root_dir, transform=None):
self.data_source = pd.read_csv(os.path.join(root_dir, csv_file_data), header=None)
self.mask_source = pd.read_csv(os.path.join(root_dir, mask_file), header=None)
self.label_source = pd.read_csv(os.path.join(root_dir, csv_file_label), header=0)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.data_source)
def __getitem__(self, key):
if isinstance(key, slice):
start, stop, step = key.indices(len(self))
return [self.get_item(i) for i in range(start, stop, step)]
elif isinstance(key, int):
return self.get_item(key)
else:
raise TypeError
def get_item(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
digit = self.data_source.iloc[idx, :]
digit = np.array(digit, dtype='uint8')
digit = digit.reshape(36, 36)
digit = digit[..., np.newaxis]
mask = self.mask_source.iloc[idx, :]
mask = np.array([mask], dtype='uint8')
label = self.label_source.iloc[idx, :]
# CHANGED
# time_age, disease_time, subject, gender, disease, location
label = torch.Tensor(np.nan_to_num(np.array(label[np.array([6, 4, 0, 5, 3, 7])])))
if self.transform:
digit = self.transform(digit)
sample = {'digit': digit, 'label': label, 'idx': idx, 'mask': mask}
return sample
| 33.586364
| 97
| 0.621464
| 998
| 7,389
| 4.38978
| 0.121242
| 0.044739
| 0.054326
| 0.041543
| 0.810774
| 0.767633
| 0.76147
| 0.750057
| 0.741383
| 0.732025
| 0
| 0.012292
| 0.25132
| 7,389
| 219
| 98
| 33.739726
| 0.779646
| 0.093111
| 0
| 0.737589
| 0
| 0
| 0.032546
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113475
| false
| 0
| 0.035461
| 0.035461
| 0.269504
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
695a9782e45d3615c7dcb68c67821e8f497503c2
| 32,072
|
py
|
Python
|
src/processor.py
|
aaronmueller/mBERT-docclass
|
c638c035d686e2242619c70318a0220b7d09033a
|
[
"Apache-2.0"
] | null | null | null |
src/processor.py
|
aaronmueller/mBERT-docclass
|
c638c035d686e2242619c70318a0220b7d09033a
|
[
"Apache-2.0"
] | null | null | null |
src/processor.py
|
aaronmueller/mBERT-docclass
|
c638c035d686e2242619c70318a0220b7d09033a
|
[
"Apache-2.0"
] | null | null | null |
import os
import csv
import glob
import json
import langcodes
from nltk.tokenize import sent_tokenize as nltk_sent_tokenize
from util import Mode
UNK = '<UNK>'
POS_TO_IGNORE = {'``', "''", ':', ',', '.', 'PU', 'PUNCT', 'SYM'}
def sent_tokenize(text, lang='en'):
lang = langcodes.Language(lang).language_name().lower()
try:
return nltk_sent_tokenize(text, language=lang)
except:
return nltk_sent_tokenize(text)
class BaseProcessor(object):
"""Base class for data converters for data sets."""
def __init__(self):
self.cache = dict()
def get_train_examples(self, data_dir):
"""Gets a collection of `LabelExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `LabelExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `LabelExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def get_examples(self, data_dir, mode):
if (data_dir, mode) not in self.cache:
if mode == Mode.train:
examples = self.get_train_examples(data_dir)
elif mode == Mode.dev:
examples = self.get_dev_examples(data_dir)
elif mode == Mode.test:
examples = self.get_test_examples(data_dir)
else:
raise ValueError('Wrong mode', mode)
self.cache[(data_dir, mode)] = examples
return self.cache[(data_dir, mode)]
##################################################################
# CLASSIFICATION
##################################################################
class ClassificationExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, language=None):
"""Constructs a ClassificationExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.lang = language
self.batch_example = None
class TSVProcessor(BaseProcessor):
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(TSVProcessor):
"""Processor for the XNLI data set."""
def __init__(self, lang):
super().__init__()
self.language = lang
def get_train_examples(self, data_dir):
fp = f'{data_dir}/multinli/multinli.train.{self.language}.tsv'
examples = []
for (i, line) in enumerate(self._read_tsv(fp)):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = line[0]
text_b = line[1]
label = line[2]
if label == "contradictory":
label = "contradiction"
examples.append(
ClassificationExample(guid=guid,
text_a=text_a,
text_b=text_b,
label=label))
return examples
def get_dev_examples(self, data_dir):
fp = f'{data_dir}/xnli.dev.tsv'
examples = []
for (i, line) in enumerate(self._read_tsv(fp)):
if i == 0:
continue
guid = "dev-%d" % (i)
language = line[0]
if language != self.language:
continue
text_a = line[6]
text_b = line[7]
label = line[1]
examples.append(
ClassificationExample(guid=guid,
text_a=text_a,
text_b=text_b,
label=label))
return examples
def get_test_examples(self, data_dir):
fp = f'{data_dir}/xnli.test.tsv'
examples = []
for (i, line) in enumerate(self._read_tsv(fp)):
if i == 0:
continue
guid = "test-%d" % (i)
language = line[0]
if language != self.language:
continue
text_a = line[6]
text_b = line[7]
label = line[1]
examples.append(
ClassificationExample(guid=guid,
text_a=text_a,
text_b=text_b,
label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MLDocProcessor(TSVProcessor):
"""Processor for the MLDoc data set."""
def __init__(self, lang):
super().__init__()
self.language = lang
def get_train_examples(self, data_dir):
fp = f"{data_dir}/{self.language}.train.1000"
return self._create_examples(self._read_tsv(fp), "train")
def get_dev_examples(self, data_dir):
fp = f"{data_dir}/{self.language}.dev"
return self._create_examples(self._read_tsv(fp), "dev")
def get_test_examples(self, data_dir):
fp = f"{data_dir}/{self.language}.test"
return self._create_examples(self._read_tsv(fp), "test")
def get_labels(self):
"""See base class."""
return ["CCAT", "ECAT", "GCAT", "MCAT"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
sentences = sent_tokenize(line[1], lang=self.language)
text_a = sentences[0]
if len(sentences) > 1:
text_b = sentences[1]
else:
text_b = None
label = line[0]
examples.append(
ClassificationExample(guid=guid,
text_a=text_a,
text_b=text_b,
label=label))
return examples
class LangIDProcessor(MLDocProcessor):
def __init__(self, data_dir):
super().__init__()
self.labels = [
lang.strip()
for lang in open(f"{data_dir}/observed.lang.txt", "r").readlines()
]
def get_train_examples(self, data_dir):
fp = f"{data_dir}/langid.train"
return self._create_examples(self._read_tsv(fp), "train")
def get_dev_examples(self, data_dir):
fp = f"{data_dir}/langid.dev"
return self._create_examples(self._read_tsv(fp), "dev")
def get_test_examples(self, data_dir):
fp = f"{data_dir}/langid.test"
return self._create_examples(self._read_tsv(fp), "test")
def get_labels(self):
return self.labels
class TobaccoProcessor(BaseProcessor):
"""Processor for the Tobacco Watcher data set."""
def __init__(self, lang):
super().__init__()
self.language = lang
def get_train_examples(self, data_dir):
fp = f"{data_dir}/20180723.translated-to-{self.language}"
return self._create_examples(self._read_jsonl(fp), "train")
def get_dev_examples(self, data_dir):
fp = f"{data_dir}/20180723.translated-to-{self.language}"
return self._create_examples(self._read_jsonl(fp), "dev")
def get_test_examples(self, data_dir):
fp = f"{data_dir}/20180723.translated-to-{self.language}"
return self._create_examples(self._read_jsonl(fp), "test")
def get_labels(self):
return ["no", "yes"] # about_tobacco
def _read_jsonl(self, input_file):
with open(input_file, 'r') as fp:
objs = []
for line in fp.readlines():
objs.append(json.loads(line))
return objs
def _create_examples(self, objs, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, obj) in enumerate(objs):
if obj['fold'] != set_type:
continue
guid = "%s-%s" % (set_type, i)
# text_a = obj['title']
sentences = sent_tokenize(obj['body'], lang=self.language)
if len(sentences) >= 2:
text_a, text_b = obj['title'] + sentences[0], sentences[1]
elif len(sentences) == 1:
text_a, text_b = obj['title'] + sentences[0], None
else:
text_a, text_b = obj['title'], None
label = obj['label']['about_tobacco']
examples.append(
ClassificationExample(guid=guid,
text_a=text_a,
text_b=text_b,
label=label))
return examples
class MLDocFeedbackProcessor(TobaccoProcessor):
"""Processor for the converted MLDoc JSON data set."""
def get_train_examples(self, data_dir):
if os.path.isfile(data_dir):
fp = data_dir
else:
fp = f"{data_dir}/combined_mldoc.json"
return self._create_examples(self._read_jsonl(fp), "train")
def get_dev_examples(self, data_dir):
if os.path.isfile(data_dir):
fp = data_dir
else:
fp = f"{data_dir}/combined_mldoc.json"
return self._create_examples(self._read_jsonl(fp), "dev")
def get_test_examples(self, data_dir):
if os.path.isfile(data_dir):
fp = data_dir
else:
fp = f"{data_dir}/combined_mldoc.json"
return self._create_examples(self._read_jsonl(fp), "test")
def get_languages(self):
"""Returns list of all languages in training set."""
return ["de", "en", "es", "fr", "it", "ja", "ru", "zh"]
def get_labels(self):
"""Returns list of all possible classification labels."""
return ["CCAT", "ECAT", "GCAT", "MCAT"]
# idea: new processor; list of processors per-lang
def _create_examples(self, objs, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, obj) in enumerate(objs):
if obj['fold'] != set_type:
continue
if obj['lang'] != self.language and self.language != 'all':
continue
else:
example_lang = obj['lang']
guid = "%s-%s" % (set_type, i)
# text_a = obj['title']
sentences = sent_tokenize(obj['body'], lang=self.language)
if len(sentences) >= 2:
text_a = sentences[0]
text_b = sentences[1]
elif len(sentences) == 1:
text_a = sentences[0]
text_b = None
else:
#text_a = None
#text_b = None
continue
label = obj['label']
examples.append(
ClassificationExample(guid=guid,
text_a=text_a,
text_b=text_b,
label=label,
language=example_lang))
return examples
class TobaccoFeedbackProcessor(TobaccoProcessor):
"""Processor for the Tobacco Watcher data set."""
def get_train_examples(self, data_dir):
if os.path.isfile(data_dir):
fp = data_dir
else:
fp = f"{data_dir}/clean_feedback.json"
return self._create_examples(self._read_jsonl(fp), "train")
def get_dev_examples(self, data_dir):
if os.path.isfile(data_dir):
fp = data_dir
else:
fp = f"{data_dir}/clean_feedback.json"
return self._create_examples(self._read_jsonl(fp), "dev")
def get_test_examples(self, data_dir):
if os.path.isfile(data_dir):
fp = data_dir
else:
fp = f"{data_dir}/clean_feedback.json"
return self._create_examples(self._read_jsonl(fp), "test")
def get_languages(self):
"""Returns list of all languages in training set."""
return ["ar", "bn", "de", "en", "es", "fr", "hi", "id", "pt",
"ru", "ta", "th", "tr", "uk", "vi", "zh"]
# idea: new processor; list of processors per-lang
def _create_examples(self, objs, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, obj) in enumerate(objs):
if obj['fold'] != set_type:
continue
if obj['lang'] != self.language and self.language != 'all':
continue
else:
example_lang = obj['lang']
guid = "%s-%s" % (set_type, i)
# text_a = obj['title']
sentences = sent_tokenize(obj['body'], lang=self.language)
if len(sentences) >= 2:
text_a = obj['title'] + sentences[0]
text_b = sentences[1]
elif len(sentences) == 1:
text_a = obj['title'] + sentences[0]
text_b = None
else:
text_a = obj['title']
text_b = None
label = obj['label']['about_tobacco']
examples.append(
ClassificationExample(guid=guid,
text_a=text_a,
text_b=text_b,
label=label,
language=example_lang))
return examples
class MrpcProcessor(TSVProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
fp = f'{data_dir}/train.tsv'
return self._create_examples(self._read_tsv(fp), "train")
def get_dev_examples(self, data_dir):
fp = f'{data_dir}/dev.tsv'
return self._create_examples(self._read_tsv(fp), "dev")
def get_test_examples(self, data_dir):
fp = f'{data_dir}/test.tsv'
return self._create_examples(self._read_tsv(fp), "test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
if set_type == "test":
label = "0"
else:
label = line[0]
examples.append(
ClassificationExample(guid=guid,
text_a=text_a,
text_b=text_b,
label=label))
return examples
class MnliProcessor(TSVProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
fp = f'{data_dir}/train.tsv'
return self._create_examples(self._read_tsv(fp), "train")
def get_dev_examples(self, data_dir):
fp = f'{data_dir}/dev_matched.tsv'
return self._create_examples(self._read_tsv(fp), "dev_matched")
def get_test_examples(self, data_dir):
fp = f'{data_dir}/test_matched.tsv'
return self._create_examples(self._read_tsv(fp), "test")
def get_labels(self):
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
if set_type == "test":
label = "contradiction"
else:
label = line[-1]
examples.append(
ClassificationExample(guid=guid,
text_a=text_a,
text_b=text_b,
label=label))
return examples
class ColaProcessor(TSVProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
fp = f'{data_dir}/train.tsv'
return self._create_examples(self._read_tsv(fp), "train")
def get_dev_examples(self, data_dir):
fp = f'{data_dir}/dev.tsv'
return self._create_examples(self._read_tsv(fp), "dev")
def get_test_examples(self, data_dir):
fp = f'{data_dir}/test.tsv'
return self._create_examples(self._read_tsv(fp), "test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = line[1]
label = "0"
else:
text_a = line[3]
label = line[1]
examples.append(
ClassificationExample(guid=guid,
text_a=text_a,
text_b=None,
label=label))
return examples
##################################################################
# LABELING
##################################################################
class LabelExample(object):
"""A single training/test example for simple sequence labeling."""
def __init__(self, guid, text, label=None):
"""Constructs a LabelExample.
Args:
guid: Unique id for the example.
text: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text = text
self.label = label
class LabelProcessor(BaseProcessor):
"""Base class for data converters for sequence labeling data sets."""
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, (text, label)) in enumerate(lines):
guid = "%s-%d" % (set_type, i)
examples.append(LabelExample(guid=guid, text=text, label=label))
return examples
class NERProcessor(LabelProcessor):
"""Processor for the NER data set."""
def __init__(self, lang):
super().__init__()
self.language = lang
assert len(self.get_labels()) == len(set(self.get_labels()))
self.id2label = self.get_labels()
self.label2id = {l: i for i, l in enumerate(self.id2label)}
def get_train_examples(self, data_dir):
"""See base class."""
fp = f'{data_dir}/{self.language}/train.iob2.txt'
return self._create_examples(self._read_file(fp), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
fp = f'{data_dir}/{self.language}/dev.iob2.txt'
return self._create_examples(self._read_file(fp), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
fp = f'{data_dir}/{self.language}/test.iob2.txt'
return self._create_examples(self._read_file(fp), "test")
def get_labels(self):
"""See base class."""
return [
"B-LOC", "B-MISC", "B-ORG", "B-PER", "I-LOC", "I-MISC", "I-ORG",
"I-PER", "O"
]
@classmethod
def _read_file(cls, input_file):
"""Reads an empty line seperated data (word \t label)."""
with open(input_file, "r") as f:
lines = []
words, labels = [], []
for line in f.readlines():
line = line.strip()
if not line:
assert len(words) == len(labels)
lines.append((words, labels))
words, labels = [], []
else:
word, label = line.split('\t')
words.append(word)
labels.append(label)
if len(words) == len(labels) and words:
lines.append((words, labels))
return lines
class WikiNERProcessor(NERProcessor):
def get_train_examples(self, data_dir):
fp = f'{data_dir}/{self.language}/train'
return self._create_examples(self._read_file(fp), "train")
def get_dev_examples(self, data_dir):
fp = f'{data_dir}/{self.language}/dev'
return self._create_examples(self._read_file(fp), "dev")
def get_test_examples(self, data_dir):
fp = f'{data_dir}/{self.language}/test'
return self._create_examples(self._read_file(fp), "test")
def get_labels(self):
return ["B-LOC", "B-ORG", "B-PER", "I-LOC", "I-ORG", "I-PER", "O"]
@classmethod
def _read_file(cls, input_file):
"""Reads an empty line seperated data (word \t label)."""
with open(input_file, "r") as f:
lines = []
words, labels = [], []
for line in f.readlines():
line = line.strip()
if not line:
assert len(words) == len(labels)
lines.append((words, labels))
words, labels = [], []
else:
word, label = line.split('\t')
word = word.split(':', 1)[1]
words.append(word)
labels.append(label)
if len(words) == len(labels) and words:
lines.append((words, labels))
return lines
class POSProcessor(LabelProcessor):
"""Processor for the POS data set from UD."""
def __init__(self, lang):
super().__init__()
self.language = lang
assert len(self.get_labels()) == len(set(self.get_labels()))
def get_train_examples(self, data_dir):
"""See base class."""
fp = f'{data_dir}/{self.language}/{self.language}-ud-train.conllu'
return self._create_examples(self._read_file(fp), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
fp = f'{data_dir}/{self.language}/{self.language}-ud-dev.conllu'
return self._create_examples(self._read_file(fp), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
fp = f'{data_dir}/{self.language}/{self.language}-ud-test.conllu'
return self._create_examples(self._read_file(fp), "test")
def get_labels(self):
"""See base class."""
return [
"_", "ADJ", "ADP", "ADV", "AUX", "CONJ", "DET", "INTJ", "NOUN",
"NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB",
"X"
]
@classmethod
def _read_file(cls, input_file):
"""Reads an empty line seperated data (word \t label)."""
with open(input_file, "r") as f:
lines = []
words, labels = [], []
for line in f.readlines():
tok = line.strip().split('\t')
if len(tok) < 2 or line[0] == '#':
assert len(words) == len(labels)
if words:
lines.append((words, labels))
words, labels = [], []
if tok[0].isdigit():
word, label = tok[1], tok[3]
words.append(word)
labels.append(label)
if len(words) == len(labels) and words:
lines.append((words, labels))
return lines
class RawPOSProcessor(POSProcessor):
'''
UD 2.3 raw file
'''
def get_train_examples(self, data_dir):
fp = f'{data_dir}/{self.language}/*-ud-train.conllu'
fp = glob.glob(fp)
if len(fp) == 1:
return self._create_examples(self._read_file(fp[0]), "train")
elif len(fp) == 0:
return []
else:
raise ValueError('Duplicate train')
def get_dev_examples(self, data_dir):
fp = f'{data_dir}/{self.language}/*-ud-dev.conllu'
fp = glob.glob(fp)
if len(fp) == 1:
return self._create_examples(self._read_file(fp[0]), "dev")
elif len(fp) == 0:
return []
else:
raise ValueError('Duplicate dev')
def get_test_examples(self, data_dir):
fp = f'{data_dir}/{self.language}/*-ud-test.conllu'
fp = glob.glob(fp)
if len(fp) == 1:
return self._create_examples(self._read_file(fp[0]), "test")
elif len(fp) == 0:
return []
else:
raise ValueError('Duplicate test')
def get_labels(self):
return [
"_", "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN",
"NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB",
"X"
]
##################################################################
# PARSING
##################################################################
class ParsingExample(object):
"""A single training/test example for simple sequence parsing."""
def __init__(self, guid, text, pos, head=None, label=None):
"""Constructs a ParsingExample.
Args:
guid: Unique id for the example.
text: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
head: (Optional) int. Head of dependency relation
label: (Optional) string. The label of the dependency relation. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text = text
self.pos = pos
self.head = head
self.label = label
class ParsingProcessor(BaseProcessor):
"""Processor for the Parsing data set from UD."""
def __init__(self, lang):
super().__init__()
self.language = lang
assert len(self.get_labels()) == len(set(self.get_labels()))
assert len(self.get_pos()) == len(set(self.get_pos()))
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, (text, pos, head, label)) in enumerate(lines):
guid = "%s-%d" % (set_type, i)
examples.append(
ParsingExample(guid=guid,
text=text,
pos=pos,
head=head,
label=label))
return examples
def get_train_examples(self, data_dir):
"""See base class."""
fp = f"{data_dir}/{self.language}/{self.language}-ud-train.conllu"
return self._create_examples(self._read_file(fp), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
fp = f"{data_dir}/{self.language}/{self.language}-ud-dev.conllu"
return self._create_examples(self._read_file(fp), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
fp = f"{data_dir}/{self.language}/{self.language}-ud-test.conllu"
return self._create_examples(self._read_file(fp), "test")
def get_labels(self):
return [
'_', 'acl', 'advcl', 'advmod', 'amod', 'appos', 'aux', 'case',
'cc', 'ccomp', 'compound', 'conj', 'cop', 'csubj', 'dep', 'det',
'discourse', 'dislocated', 'expl', 'fixed', 'flat', 'goeswith',
'iobj', 'list', 'mark', 'nmod', 'nsubj', 'nummod', 'obj', 'obl',
'orphan', 'parataxis', 'punct', 'reparandum', 'root', 'vocative',
'xcomp', UNK
]
def get_pos(self):
return [
"_", "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ",
"NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM",
"VERB", "X"
]
@classmethod
def _read_file(cls, input_file):
"""Reads an empty line seperated data (word \t label)."""
with open(input_file, "r") as f:
lines = []
words, pos_tags, heads, labels = [], [], [], []
for line in f.readlines():
tok = line.strip().split('\t')
if len(tok) < 2 or line[0] == '#':
assert len(words) == len(pos_tags) == len(heads) == len(
labels)
if words:
lines.append((words, pos_tags, heads, labels))
words, pos_tags, heads, labels = [], [], [], []
if tok[0].isdigit():
word, pos, head, label = tok[1], tok[3], tok[6], tok[7]
words.append(word)
pos_tags.append(pos)
heads.append(int(head))
if pos in POS_TO_IGNORE:
labels.append('')
else:
labels.append(label.split(':')[0])
if words:
assert len(words) == len(pos_tags) == len(heads) == len(labels)
lines.append((words, pos_tags, heads, labels))
return lines
class RawParsingProcessor(ParsingProcessor):
'''
UD 2.3 raw file
'''
def get_labels(self):
return [
'_', 'acl', 'advcl', 'advmod', 'amod', 'appos', 'aux', 'case',
'cc', 'ccomp', 'clf', 'compound', 'conj', 'cop', 'csubj', 'dep',
'det', 'discourse', 'dislocated', 'expl', 'fixed', 'flat',
'goeswith', 'iobj', 'list', 'mark', 'nmod', 'nsubj', 'nummod',
'obj', 'obl', 'orphan', 'parataxis', 'punct', 'reparandum', 'root',
'vocative', 'xcomp', UNK
]
def get_train_examples(self, data_dir):
fp = f'{data_dir}/{self.language}/*-ud-train.conllu'
fp = glob.glob(fp)
if len(fp) == 1:
return self._create_examples(self._read_file(fp[0]), "train")
elif len(fp) == 0:
return []
else:
raise ValueError('Duplicate train')
def get_dev_examples(self, data_dir):
fp = f'{data_dir}/{self.language}/*-ud-dev.conllu'
fp = glob.glob(fp)
if len(fp) == 1:
return self._create_examples(self._read_file(fp[0]), "dev")
elif len(fp) == 0:
return []
else:
raise ValueError('Duplicate dev')
def get_test_examples(self, data_dir):
fp = f'{data_dir}/{self.language}/*-ud-test.conllu'
fp = glob.glob(fp)
if len(fp) == 1:
return self._create_examples(self._read_file(fp[0]), "test")
elif len(fp) == 0:
return []
else:
raise ValueError('Duplicate test')
| 35.321586
| 90
| 0.523634
| 3,672
| 32,072
| 4.386983
| 0.082789
| 0.049538
| 0.056987
| 0.057794
| 0.80961
| 0.783413
| 0.766031
| 0.747719
| 0.73102
| 0.719908
| 0
| 0.005789
| 0.337491
| 32,072
| 907
| 91
| 35.360529
| 0.752353
| 0.109784
| 0
| 0.744713
| 0
| 0
| 0.106707
| 0.053697
| 0
| 0
| 0
| 0
| 0.013595
| 1
| 0.141994
| false
| 0
| 0.010574
| 0.015106
| 0.314199
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
697173c78fc928f7503fe04bb399ba900a7be741
| 140
|
py
|
Python
|
fpn/symbols/__init__.py
|
LZP4GitHub/RoITransformer_DOTA
|
1794694671247c3ca55781d6d61e0a9ed59a64b8
|
[
"MIT"
] | 200
|
2019-06-14T08:33:58.000Z
|
2022-03-31T11:29:03.000Z
|
fpn/symbols/__init__.py
|
LZP4GitHub/RoITransformer_DOTA
|
1794694671247c3ca55781d6d61e0a9ed59a64b8
|
[
"MIT"
] | 28
|
2019-06-21T10:57:02.000Z
|
2021-07-14T06:54:21.000Z
|
fpn/symbols/__init__.py
|
LZP4GitHub/RoITransformer_DOTA
|
1794694671247c3ca55781d6d61e0a9ed59a64b8
|
[
"MIT"
] | 42
|
2019-06-14T08:58:10.000Z
|
2022-01-27T02:27:35.000Z
|
import resnet_v1_101_fpn_rcnn
import resnet_v1_101_fpn_rcnn_rotbox_light_head
import resnet_v1_101_fpn_rcnn_rotbox_light_head_RoITransformer
| 46.666667
| 62
| 0.964286
| 25
| 140
| 4.64
| 0.4
| 0.310345
| 0.362069
| 0.439655
| 0.87931
| 0.87931
| 0.672414
| 0.672414
| 0.672414
| 0
| 0
| 0.088889
| 0.035714
| 140
| 3
| 62
| 46.666667
| 0.77037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
d6ed8246560e9a927b6391f2f98eeb841ae2218a
| 4,090
|
py
|
Python
|
dfirtrack_main/tests/taskstatus/test_taskstatus_views.py
|
blackhatethicalhacking/dfirtrack
|
9c2e13015291f2981d14d63c9683e7c447e91f3a
|
[
"MIT"
] | 4
|
2020-03-06T17:37:09.000Z
|
2020-03-17T07:50:55.000Z
|
dfirtrack_main/tests/taskstatus/test_taskstatus_views.py
|
blackhatethicalhacking/dfirtrack
|
9c2e13015291f2981d14d63c9683e7c447e91f3a
|
[
"MIT"
] | null | null | null |
dfirtrack_main/tests/taskstatus/test_taskstatus_views.py
|
blackhatethicalhacking/dfirtrack
|
9c2e13015291f2981d14d63c9683e7c447e91f3a
|
[
"MIT"
] | 1
|
2020-03-06T20:54:52.000Z
|
2020-03-06T20:54:52.000Z
|
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_main.models import Taskstatus
import urllib.parse
class TaskstatusViewTestCase(TestCase):
""" taskstatus view tests """
@classmethod
def setUpTestData(cls):
# create object
Taskstatus.objects.create(taskstatus_name='taskstatus_1')
# create user
test_user = User.objects.create_user(username='testuser_taskstatus', password='TZjmjiUQviOnIEral6l9')
def test_taskstatuss_list_not_logged_in(self):
""" test list view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/taskstatuss/', safe='')
# get response
response = self.client.get('/taskstatuss/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_taskstatuss_list_logged_in(self):
""" test list view """
# login testuser
login = self.client.login(username='testuser_taskstatus', password='TZjmjiUQviOnIEral6l9')
# get response
response = self.client.get('/taskstatuss/')
# compare
self.assertEqual(response.status_code, 200)
def test_taskstatuss_list_template(self):
""" test list view """
# login testuser
login = self.client.login(username='testuser_taskstatus', password='TZjmjiUQviOnIEral6l9')
# get response
response = self.client.get('/taskstatuss/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/taskstatus/taskstatuss_list.html')
def test_taskstatuss_list_get_user_context(self):
""" test list view """
# login testuser
login = self.client.login(username='testuser_taskstatus', password='TZjmjiUQviOnIEral6l9')
# get response
response = self.client.get('/taskstatuss/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_taskstatus')
def test_taskstatuss_detail_not_logged_in(self):
""" test detail view """
# get object
taskstatus_1 = Taskstatus.objects.get(taskstatus_name='taskstatus_1')
# create url
destination = '/login/?next=' + urllib.parse.quote('/taskstatuss/' + str(taskstatus_1.taskstatus_id), safe='')
# get response
response = self.client.get('/taskstatuss/' + str(taskstatus_1.taskstatus_id), follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_taskstatuss_detail_logged_in(self):
""" test detail view """
# get object
taskstatus_1 = Taskstatus.objects.get(taskstatus_name='taskstatus_1')
# login testuser
login = self.client.login(username='testuser_taskstatus', password='TZjmjiUQviOnIEral6l9')
# get response
response = self.client.get('/taskstatuss/' + str(taskstatus_1.taskstatus_id))
# compare
self.assertEqual(response.status_code, 200)
def test_taskstatuss_detail_template(self):
""" test detail view """
# get object
taskstatus_1 = Taskstatus.objects.get(taskstatus_name='taskstatus_1')
# login testuser
login = self.client.login(username='testuser_taskstatus', password='TZjmjiUQviOnIEral6l9')
# get response
response = self.client.get('/taskstatuss/' + str(taskstatus_1.taskstatus_id))
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/taskstatus/taskstatuss_detail.html')
def test_taskstatuss_detail_get_user_context(self):
""" test detail view """
# get object
taskstatus_1 = Taskstatus.objects.get(taskstatus_name='taskstatus_1')
# login testuser
login = self.client.login(username='testuser_taskstatus', password='TZjmjiUQviOnIEral6l9')
# get response
response = self.client.get('/taskstatuss/' + str(taskstatus_1.taskstatus_id))
# compare
self.assertEqual(str(response.context['user']), 'testuser_taskstatus')
| 39.326923
| 118
| 0.675306
| 426
| 4,090
| 6.29108
| 0.147887
| 0.057463
| 0.070522
| 0.068657
| 0.851119
| 0.806716
| 0.785075
| 0.777612
| 0.713806
| 0.642164
| 0
| 0.014321
| 0.21467
| 4,090
| 103
| 119
| 39.708738
| 0.82005
| 0.12445
| 0
| 0.488889
| 0
| 0
| 0.18044
| 0.027452
| 0
| 0
| 0
| 0
| 0.177778
| 1
| 0.2
| false
| 0.155556
| 0.088889
| 0
| 0.311111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
ba44e721c8d1d90d5116c740aba5339427ab338e
| 84
|
py
|
Python
|
mspac/__init__.py
|
jayvdb/mspac
|
0cf8c2192c919996255267ec313c34defc1aa56b
|
[
"MIT"
] | 1
|
2017-04-24T09:13:17.000Z
|
2017-04-24T09:13:17.000Z
|
mspac/__init__.py
|
jayvdb/mspac
|
0cf8c2192c919996255267ec313c34defc1aa56b
|
[
"MIT"
] | 1
|
2019-10-13T07:50:11.000Z
|
2019-10-14T16:58:37.000Z
|
mspac/__init__.py
|
jayvdb/mspac
|
0cf8c2192c919996255267ec313c34defc1aa56b
|
[
"MIT"
] | 3
|
2017-04-24T10:35:28.000Z
|
2019-10-13T10:58:05.000Z
|
#!/usr/bin/env python3
from . import mspac_tool
def main():
mspac_tool.main()
| 12
| 24
| 0.678571
| 13
| 84
| 4.230769
| 0.769231
| 0.327273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.178571
| 84
| 6
| 25
| 14
| 0.782609
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ba7475422e33d52aaada77b727ec69a9ccd3b97c
| 152
|
py
|
Python
|
6 kyu/Balance the arrays.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 6
|
2020-09-03T09:32:25.000Z
|
2020-12-07T04:10:01.000Z
|
6 kyu/Balance the arrays.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 1
|
2021-12-13T15:30:21.000Z
|
2021-12-13T15:30:21.000Z
|
6 kyu/Balance the arrays.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | null | null | null |
from collections import Counter
def balance(arr1, arr2):
return sorted(j for j in Counter(arr1).values())==sorted(j for j in Counter(arr2).values())
| 50.666667
| 95
| 0.736842
| 25
| 152
| 4.48
| 0.56
| 0.125
| 0.178571
| 0.196429
| 0.357143
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.131579
| 152
| 3
| 95
| 50.666667
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
2414da753eebc4510a68518aaa22aa14bfbf116a
| 9,378
|
py
|
Python
|
venv/Lib/site-packages/tensorflow/python/ops/gen_special_math_ops.py
|
rexliu3/StockTradingBotCloud
|
46b732b9c05f73bc0e856a3c4a16854b6d12e18e
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tensorflow/python/ops/gen_special_math_ops.py
|
rexliu3/StockTradingBotCloud
|
46b732b9c05f73bc0e856a3c4a16854b6d12e18e
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tensorflow/python/ops/gen_special_math_ops.py
|
rexliu3/StockTradingBotCloud
|
46b732b9c05f73bc0e856a3c4a16854b6d12e18e
|
[
"MIT"
] | 1
|
2020-06-28T11:47:47.000Z
|
2020-06-28T11:47:47.000Z
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: special_math_ops.cc
"""
import collections
from tensorflow.python import pywrap_tfe as pywrap_tfe
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
def dawsn(x, name=None):
r"""TODO: add doc.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Dawsn", name,
tld.op_callbacks, x)
return _result
except _core._FallbackException:
try:
return dawsn_eager_fallback(
x, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Dawsn", x=x, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Dawsn", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Dawsn = tf_export("raw_ops.Dawsn")(_ops.to_raw_op(dawsn))
def dawsn_eager_fallback(x, name, ctx):
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Dawsn", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Dawsn", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def expint(x, name=None):
r"""TODO: add doc.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Expint", name,
tld.op_callbacks, x)
return _result
except _core._FallbackException:
try:
return expint_eager_fallback(
x, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Expint", x=x, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Expint", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Expint = tf_export("raw_ops.Expint")(_ops.to_raw_op(expint))
def expint_eager_fallback(x, name, ctx):
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Expint", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Expint", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def fresnel_cos(x, name=None):
r"""TODO: add doc.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "FresnelCos", name,
tld.op_callbacks, x)
return _result
except _core._FallbackException:
try:
return fresnel_cos_eager_fallback(
x, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FresnelCos", x=x, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FresnelCos", _inputs_flat, _attrs, _result)
_result, = _result
return _result
FresnelCos = tf_export("raw_ops.FresnelCos")(_ops.to_raw_op(fresnel_cos))
def fresnel_cos_eager_fallback(x, name, ctx):
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"FresnelCos", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FresnelCos", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def fresnel_sin(x, name=None):
r"""TODO: add doc.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "FresnelSin", name,
tld.op_callbacks, x)
return _result
except _core._FallbackException:
try:
return fresnel_sin_eager_fallback(
x, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FresnelSin", x=x, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FresnelSin", _inputs_flat, _attrs, _result)
_result, = _result
return _result
FresnelSin = tf_export("raw_ops.FresnelSin")(_ops.to_raw_op(fresnel_sin))
def fresnel_sin_eager_fallback(x, name, ctx):
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"FresnelSin", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FresnelSin", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def spence(x, name=None):
r"""TODO: add doc.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Spence", name,
tld.op_callbacks, x)
return _result
except _core._FallbackException:
try:
return spence_eager_fallback(
x, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Spence", x=x, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Spence", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Spence = tf_export("raw_ops.Spence")(_ops.to_raw_op(spence))
def spence_eager_fallback(x, name, ctx):
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Spence", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Spence", _inputs_flat, _attrs, _result)
_result, = _result
return _result
| 32.116438
| 77
| 0.686074
| 1,259
| 9,378
| 4.688642
| 0.094519
| 0.042351
| 0.038116
| 0.030493
| 0.868372
| 0.829239
| 0.824835
| 0.811282
| 0.811282
| 0.810266
| 0
| 0.003369
| 0.208787
| 9,378
| 291
| 78
| 32.226804
| 0.792183
| 0.153658
| 0
| 0.754717
| 1
| 0
| 0.035377
| 0
| 0
| 0
| 0
| 0.017182
| 0
| 1
| 0.04717
| false
| 0.023585
| 0.056604
| 0
| 0.198113
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
2429825dc7ae78ecb355637e883c31dcc4972dfe
| 19,221
|
py
|
Python
|
velkozz_web_api/apps/social_media_api/migrations/0007_auto_20210331_0424.py
|
velkoz-data-ingestion/velkozz_web_api
|
519a6a90e5fdf5bab8ba2daf637768c5fd424a12
|
[
"MIT"
] | null | null | null |
velkozz_web_api/apps/social_media_api/migrations/0007_auto_20210331_0424.py
|
velkoz-data-ingestion/velkozz_web_api
|
519a6a90e5fdf5bab8ba2daf637768c5fd424a12
|
[
"MIT"
] | null | null | null |
velkozz_web_api/apps/social_media_api/migrations/0007_auto_20210331_0424.py
|
velkoz-data-ingestion/velkozz_web_api
|
519a6a90e5fdf5bab8ba2daf637768c5fd424a12
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.3 on 2021-03-31 04:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('social_media_api', '0006_auto_20210329_0700'),
]
operations = [
migrations.AlterField(
model_name='canadaposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='canadaposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='canadaposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='chinaposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='chinaposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='chinaposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='conflictnewsposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='conflictnewsposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='conflictnewsposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='conservativeposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='conservativeposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='conservativeposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='cryptocurrencyposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='cryptocurrencyposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='cryptocurrencyposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='democratsposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='democratsposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='democratsposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='economicsposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='economicsposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='economicsposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='energyposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='energyposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='energyposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='hongkongposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='hongkongposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='hongkongposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='indiaposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='indiaposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='indiaposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='israelposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='israelposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='israelposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='japanposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='japanposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='japanposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='koreaposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='koreaposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='koreaposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='liberalposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='liberalposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='liberalposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='libertarianposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='libertarianposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='libertarianposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='malaysiaposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='malaysiaposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='malaysiaposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='middleeastnewsposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='middleeastnewsposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='middleeastnewsposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='neutralpoliticsposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='neutralpoliticsposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='neutralpoliticsposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='newsposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='newsposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='newsposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='northkoreanewsposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='northkoreanewsposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='northkoreanewsposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='northkoreaposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='northkoreaposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='northkoreaposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='pakistanposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='pakistanposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='pakistanposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='palestineposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='palestineposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='palestineposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='politicsposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='politicsposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='politicsposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='progressiveposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='progressiveposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='progressiveposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='realtechposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='realtechposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='realtechposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='scienceposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='scienceposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='scienceposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='singaporeposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='singaporeposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='singaporeposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='socialismposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='socialismposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='socialismposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='spaceposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='spaceposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='spaceposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='taiwanposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='taiwanposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='taiwanposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='technologyposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='technologyposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='technologyposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='techposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='techposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='techposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='thailandposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='thailandposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='thailandposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='ukpoliticsposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='ukpoliticsposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='ukpoliticsposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='wallstreetbetsposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='wallstreetbetsposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='wallstreetbetsposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='worldnewsposts',
name='author',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='worldnewsposts',
name='author_created',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='worldnewsposts',
name='title',
field=models.CharField(max_length=300, null=True),
),
]
| 33.780316
| 62
| 0.551168
| 1,622
| 19,221
| 6.391492
| 0.048089
| 0.214141
| 0.267676
| 0.310504
| 0.984856
| 0.984856
| 0.984856
| 0.980997
| 0.931224
| 0.931224
| 0
| 0.019818
| 0.335831
| 19,221
| 568
| 63
| 33.839789
| 0.792261
| 0.002341
| 0
| 0.987544
| 1
| 0
| 0.12929
| 0.0012
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.001779
| 0
| 0.007117
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
79fe0af5ec8a5be4a49c771606bea0bfd69602a1
| 69
|
py
|
Python
|
mushroom_rl_benchmark/builders/actor_critic/__init__.py
|
MushroomRL/mushroom-rl-benchmarking
|
0cde27e84e3f37dbaa259f0faba7af800aefc589
|
[
"MIT"
] | null | null | null |
mushroom_rl_benchmark/builders/actor_critic/__init__.py
|
MushroomRL/mushroom-rl-benchmarking
|
0cde27e84e3f37dbaa259f0faba7af800aefc589
|
[
"MIT"
] | null | null | null |
mushroom_rl_benchmark/builders/actor_critic/__init__.py
|
MushroomRL/mushroom-rl-benchmarking
|
0cde27e84e3f37dbaa259f0faba7af800aefc589
|
[
"MIT"
] | null | null | null |
from .classic_actor_critic import *
from .deep_actor_critic import *
| 23
| 35
| 0.826087
| 10
| 69
| 5.3
| 0.6
| 0.415094
| 0.641509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 69
| 2
| 36
| 34.5
| 0.868852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
03622db5f705f3a35e7f9f7f9cdbe8c30915b69e
| 993
|
py
|
Python
|
text_analysis/main/forms.py
|
bungoume/mecab-web-api
|
d69fb87778c54e73652652b38dd14245bfb8f4a5
|
[
"MIT"
] | 36
|
2015-01-14T03:11:28.000Z
|
2022-01-10T03:50:21.000Z
|
text_analysis/main/forms.py
|
bungoume/mecab-web-api
|
d69fb87778c54e73652652b38dd14245bfb8f4a5
|
[
"MIT"
] | 49
|
2018-10-04T04:03:11.000Z
|
2022-03-07T02:00:25.000Z
|
text_analysis/main/forms.py
|
bungoume/mecab-web-api
|
d69fb87778c54e73652652b38dd14245bfb8f4a5
|
[
"MIT"
] | 7
|
2015-07-28T05:02:56.000Z
|
2021-05-28T17:38:11.000Z
|
from django import forms
from django.core import validators
class ReadingForm(forms.Form):
sentence = forms.CharField(required=False)
nbest_num = forms.IntegerField(validators=[
validators.MinValueValidator(1), validators.MaxValueValidator(50)], required=False)
def clean_sentence(self):
return self.cleaned_data.get('sentence', '')
def clean_nbest_num(self):
nbest_num = self.cleaned_data.get('nbest_num')
if nbest_num is None:
return 10
return nbest_num
class ParseForm(forms.Form):
sentence = forms.CharField(required=False)
nbest_num = forms.IntegerField(validators=[
validators.MinValueValidator(1), validators.MaxValueValidator(50)], required=False)
def clean_sentence(self):
return self.cleaned_data.get('sentence', '')
def clean_nbest_num(self):
nbest_num = self.cleaned_data.get('nbest_num')
if nbest_num is None:
return 3
return nbest_num
| 30.090909
| 91
| 0.690836
| 119
| 993
| 5.596639
| 0.277311
| 0.144144
| 0.09009
| 0.108108
| 0.831832
| 0.831832
| 0.831832
| 0.831832
| 0.831832
| 0.831832
| 0
| 0.011494
| 0.21148
| 993
| 32
| 92
| 31.03125
| 0.83908
| 0
| 0
| 0.75
| 0
| 0
| 0.03424
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.083333
| 0.083333
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
036b53337d2830ae09ce21b61cc515bd1a4c890c
| 12,169
|
py
|
Python
|
kobra/api/v1/tests/test_discounts.py
|
karservice/kobra
|
2019fd3be499c06d2527e80576fd6ff03d8fe151
|
[
"MIT"
] | 4
|
2016-08-28T16:00:20.000Z
|
2018-01-31T18:22:43.000Z
|
kobra/api/v1/tests/test_discounts.py
|
karservice/kobra
|
2019fd3be499c06d2527e80576fd6ff03d8fe151
|
[
"MIT"
] | 25
|
2016-08-15T20:57:59.000Z
|
2022-02-10T18:14:48.000Z
|
kobra/api/v1/tests/test_discounts.py
|
karservice/kobra
|
2019fd3be499c06d2527e80576fd6ff03d8fe151
|
[
"MIT"
] | 1
|
2017-02-06T17:13:16.000Z
|
2017-02-06T17:13:16.000Z
|
# -*- coding: utf-8 -*-
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from ....factories import (DiscountFactory, DiscountRegistrationFactory,
TicketTypeFactory, UserFactory)
class DiscountApiTests(APITestCase):
def test_list_unauthenticated(self):
url = reverse('v1:discount-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_list_authenticated(self):
url = reverse('v1:discount-list')
user = UserFactory()
self.client.force_authenticate(user)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
def test_list_authenticated_unowned(self):
url = reverse('v1:discount-list')
user = UserFactory()
# Creates a Discount "owned" by someone else
unowned_discount = DiscountFactory()
self.client.force_authenticate(user)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
def test_list_authenticated_owned(self):
url = reverse('v1:discount-list')
user = UserFactory()
owned_discount = DiscountFactory()
owned_discount.ticket_type.event.organization.admins.add(user)
unowned_discount = DiscountFactory()
self.client.force_authenticate(user)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['id'], str(owned_discount.id))
def test_create_unauthenticated(self):
url = reverse('v1:discount-list')
# I'm lazy. Let's use the factory, but don't save the object.
temp_discount = DiscountFactory.build()
request_data = {
'ticket_type': reverse(
'v1:tickettype-detail',
kwargs={'pk': temp_discount.ticket_type.pk}),
'union': reverse(
'v1:union-detail', kwargs={'pk': temp_discount.union.pk}),
'amount': temp_discount.amount
}
response = self.client.post(url, data=request_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_authenticated_unowned_ticket_type(self):
url = reverse('v1:discount-list')
user = UserFactory()
# I'm lazy. Let's use the factory, but don't save the object.
temp_discount = DiscountFactory.build()
request_data = {
'ticket_type': reverse(
'v1:tickettype-detail',
kwargs={'pk': temp_discount.ticket_type.pk}),
'union': reverse(
'v1:union-detail', kwargs={'pk': temp_discount.union.pk}),
'amount': temp_discount.amount
}
self.client.force_authenticate(user)
response = self.client.post(url, data=request_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_authenticated_owned_ticket_type(self):
url = reverse('v1:discount-list')
user = UserFactory()
owned_ticket_type = TicketTypeFactory()
owned_ticket_type.event.organization.admins.add(user)
# I'm lazy. Let's use the factory, but don't save the object.
temp_discount = DiscountFactory.build(ticket_type=owned_ticket_type)
request_data = {
'ticket_type': reverse(
'v1:tickettype-detail',
kwargs={'pk': temp_discount.ticket_type.pk}),
'union': reverse(
'v1:union-detail', kwargs={'pk': temp_discount.union.pk}),
'amount': temp_discount.amount
}
self.client.force_authenticate(user)
response = self.client.post(url, data=request_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_retrieve_unauthenticated(self):
discount = DiscountFactory()
url = reverse('v1:discount-detail', kwargs={'pk': discount.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_retrieve_authenticated_unowned(self):
user = UserFactory()
discount = DiscountFactory()
url = reverse('v1:discount-detail', kwargs={'pk': discount.pk})
self.client.force_authenticate(user)
response = self.client.get(url)
# Authenticated requests should be treated as 404 when retrieving an
# unowned discount
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_retrieve_authenticated_owned(self):
user = UserFactory()
discount = DiscountFactory()
discount.ticket_type.event.organization.admins.add(user)
url = reverse('v1:discount-detail', kwargs={'pk': discount.pk})
self.client.force_authenticate(user)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['id'], str(discount.id))
def test_update_unauthenticated(self):
original_ticket_type = TicketTypeFactory()
discount = DiscountFactory(ticket_type=original_ticket_type)
url = reverse('v1:discount-detail', kwargs={'pk': discount.pk})
new_ticket_type = TicketTypeFactory()
new_ticket_type_url = reverse('v1:tickettype-detail',
kwargs={'pk': new_ticket_type.pk})
# Request with changed ticket type
request_data = {
'ticket_type': new_ticket_type_url,
'union': reverse('v1:union-detail',
kwargs={'pk': discount.union.pk}),
'amount': discount.amount
}
response = self.client.put(url, data=request_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_authenticated_unowned_to_unowned_ticket_type(self):
user = UserFactory()
original_ticket_type = TicketTypeFactory() # Unowned
discount = DiscountFactory(ticket_type=original_ticket_type)
url = reverse('v1:discount-detail', kwargs={'pk': discount.pk})
new_ticket_type = TicketTypeFactory() # Unowned
new_ticket_type_url = reverse('v1:tickettype-detail',
kwargs={'pk': new_ticket_type.pk})
# Request with changed ticket type
request_data = {
'ticket_type': new_ticket_type_url,
'union': reverse('v1:union-detail',
kwargs={'pk': discount.union.pk}),
'amount': discount.amount
}
self.client.force_authenticate(user)
response = self.client.put(url, data=request_data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_update_authenticated_unowned_to_owned_ticket_type(self):
user = UserFactory()
original_ticket_type = TicketTypeFactory() # Unowned
discount = DiscountFactory(ticket_type=original_ticket_type)
url = reverse('v1:discount-detail', kwargs={'pk': discount.pk})
new_ticket_type = TicketTypeFactory() # Owned
new_ticket_type.event.organization.admins.add(user)
new_ticket_type_url = reverse('v1:tickettype-detail',
kwargs={'pk': new_ticket_type.pk})
# Request with changed ticket type
request_data = {
'ticket_type': new_ticket_type_url,
'union': reverse('v1:union-detail',
kwargs={'pk': discount.union.pk}),
'amount': discount.amount
}
self.client.force_authenticate(user)
response = self.client.put(url, data=request_data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_update_authenticated_owned_to_unowned_ticket_type(self):
user = UserFactory()
original_ticket_type = TicketTypeFactory() # Owned
original_ticket_type.event.organization.admins.add(user)
discount = DiscountFactory(ticket_type=original_ticket_type)
url = reverse('v1:discount-detail', kwargs={'pk': discount.pk})
new_ticket_type = TicketTypeFactory() # Unowned
new_ticket_type_url = reverse('v1:tickettype-detail',
kwargs={'pk': new_ticket_type.pk})
# Request with changed ticket type
request_data = {
'ticket_type': new_ticket_type_url,
'union': reverse('v1:union-detail',
kwargs={'pk': discount.union.pk}),
'amount': discount.amount
}
self.client.force_authenticate(user)
response = self.client.put(url, data=request_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_authenticated_owned_to_owned_ticket_type(self):
user = UserFactory()
original_ticket_type = TicketTypeFactory() # Owned
original_ticket_type.event.organization.admins.add(user)
discount = DiscountFactory(ticket_type=original_ticket_type)
url = reverse('v1:discount-detail', kwargs={'pk': discount.pk})
new_ticket_type = TicketTypeFactory() # Owned
new_ticket_type.event.organization.admins.add(user)
new_ticket_type_url = reverse('v1:tickettype-detail',
kwargs={'pk': new_ticket_type.pk})
# Request with changed ticket type
request_data = {
'ticket_type': new_ticket_type_url,
'union': reverse('v1:union-detail',
kwargs={'pk': discount.union.pk}),
'amount': discount.amount
}
self.client.force_authenticate(user)
response = self.client.put(url, data=request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_unauthenticated(self):
discount = DiscountFactory()
url = reverse('v1:discount-detail', kwargs={'pk': discount.pk})
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_authenticated_unowned_without_registrations(self):
user = UserFactory()
discount = DiscountFactory()
url = reverse('v1:discount-detail', kwargs={'pk': discount.pk})
self.client.force_authenticate(user)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_authenticated_unowned_with_registrations(self):
user = UserFactory()
discount = DiscountFactory()
DiscountRegistrationFactory(discount=discount)
url = reverse('v1:discount-detail', kwargs={'pk': discount.pk})
self.client.force_authenticate(user)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_authenticated_owned_without_registrations(self):
user = UserFactory()
discount = DiscountFactory()
discount.ticket_type.event.organization.admins.add(user)
url = reverse('v1:discount-detail', kwargs={'pk': discount.pk})
self.client.force_authenticate(user)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_delete_authenticated_owned_with_registrations(self):
user = UserFactory()
discount = DiscountFactory()
discount.ticket_type.event.organization.admins.add(user)
DiscountRegistrationFactory(discount=discount)
url = reverse('v1:discount-detail', kwargs={'pk': discount.pk})
self.client.force_authenticate(user)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| 41.250847
| 76
| 0.652642
| 1,332
| 12,169
| 5.732733
| 0.084835
| 0.090361
| 0.053169
| 0.052383
| 0.897721
| 0.888685
| 0.879256
| 0.850969
| 0.833421
| 0.825825
| 0
| 0.011064
| 0.242419
| 12,169
| 294
| 77
| 41.391156
| 0.817225
| 0.045115
| 0
| 0.800885
| 0
| 0
| 0.074489
| 0
| 0
| 0
| 0
| 0
| 0.110619
| 1
| 0.088496
| false
| 0
| 0.017699
| 0
| 0.110619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
301ce3448b97a5084fd8ceb35a896528dfcd518e
| 2,962
|
py
|
Python
|
mat_db/main/migrations/0007_rename_e_min40_hosedynamic_dyn_e_min40_and_more.py
|
tkminek/material_database
|
8661617077192d20e8d9445cd6560bf1266f0582
|
[
"MIT"
] | null | null | null |
mat_db/main/migrations/0007_rename_e_min40_hosedynamic_dyn_e_min40_and_more.py
|
tkminek/material_database
|
8661617077192d20e8d9445cd6560bf1266f0582
|
[
"MIT"
] | null | null | null |
mat_db/main/migrations/0007_rename_e_min40_hosedynamic_dyn_e_min40_and_more.py
|
tkminek/material_database
|
8661617077192d20e8d9445cd6560bf1266f0582
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-02-25 21:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0006_hosedynamic_nu_min40_hosedynamic_nu_plus100_and_more'),
]
operations = [
migrations.RenameField(
model_name='hosedynamic',
old_name='E_min40',
new_name='Dyn_E_min40',
),
migrations.RenameField(
model_name='hosedynamic',
old_name='E_plus100',
new_name='Dyn_E_plus100',
),
migrations.RenameField(
model_name='hosedynamic',
old_name='E_plus23',
new_name='Dyn_E_plus23',
),
migrations.RenameField(
model_name='hosedynamic',
old_name='comment',
new_name='Dyn_comment',
),
migrations.RenameField(
model_name='hosedynamic',
old_name='hose_id',
new_name='Dyn_hose_id',
),
migrations.RenameField(
model_name='hosedynamic',
old_name='name',
new_name='Dyn_name',
),
migrations.RenameField(
model_name='hosedynamic',
old_name='nu_min40',
new_name='Dyn_nu_min40',
),
migrations.RenameField(
model_name='hosedynamic',
old_name='nu_plus100',
new_name='Dyn_nu_plus100',
),
migrations.RenameField(
model_name='hosedynamic',
old_name='nu_plus23',
new_name='Dyn_nu_plus23',
),
migrations.RenameField(
model_name='hosestatic',
old_name='E_min40',
new_name='Stat_E_min40',
),
migrations.RenameField(
model_name='hosestatic',
old_name='E_plus100',
new_name='Stat_E_plus100',
),
migrations.RenameField(
model_name='hosestatic',
old_name='E_plus23',
new_name='Stat_E_plus23',
),
migrations.RenameField(
model_name='hosestatic',
old_name='comment',
new_name='Stat_comment',
),
migrations.RenameField(
model_name='hosestatic',
old_name='hose_id',
new_name='Stat_hose_id',
),
migrations.RenameField(
model_name='hosestatic',
old_name='name',
new_name='Stat_name',
),
migrations.RenameField(
model_name='hosestatic',
old_name='nu_min40',
new_name='Stat_nu_min40',
),
migrations.RenameField(
model_name='hosestatic',
old_name='nu_plus100',
new_name='Stat_nu_plus100',
),
migrations.RenameField(
model_name='hosestatic',
old_name='nu_plus23',
new_name='Stat_nu_plus23',
),
]
| 28.480769
| 78
| 0.530047
| 277
| 2,962
| 5.277978
| 0.148014
| 0.25855
| 0.320109
| 0.369357
| 0.833789
| 0.784542
| 0.638167
| 0.475376
| 0
| 0
| 0
| 0.042485
| 0.364281
| 2,962
| 103
| 79
| 28.757282
| 0.733935
| 0.015192
| 0
| 0.742268
| 1
| 0
| 0.208233
| 0.019554
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010309
| 0
| 0.041237
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3043bff845f341bd8ff67f6f5179b65c54f7c05b
| 10,087
|
py
|
Python
|
couch/tests.py
|
specialunderwear/tornado-couchdb
|
b1f1d15b1f52c5de124a58c1353797a3ed04cea3
|
[
"MIT"
] | null | null | null |
couch/tests.py
|
specialunderwear/tornado-couchdb
|
b1f1d15b1f52c5de124a58c1353797a3ed04cea3
|
[
"MIT"
] | null | null | null |
couch/tests.py
|
specialunderwear/tornado-couchdb
|
b1f1d15b1f52c5de124a58c1353797a3ed04cea3
|
[
"MIT"
] | null | null | null |
import json
import re
import couch
from tornado import ioloop, gen
dbname1 = 'tornado-couch-testdb'
dbname2 = 'tornado-couch-testdb2'
def run_blocking_tests():
# set up tests
doc1 = {'msg': 'Test doc 1'}
doc2 = {'msg': 'Test doc 2'}
doc3 = {'value': float('nan')}
db = couch.BlockingCouch(dbname1)
db2 = couch.BlockingCouch(dbname2)
try:
db.delete_db()
except couch.NotFound:
pass
try:
db2.delete_db()
except couch.NotFound:
pass
# create database
resp = db.create_db()
assert 'ok' in resp, 'Failed to create database'
# list databases
resp = db.list_dbs()
assert db.db_name in resp, 'Database not in list of databases'
# info_db
resp = db.info_db()
assert ('db_name' in resp) and (resp['db_name'] == db.db_name), \
'No database info'
# uuids
resp = db.uuids()
assert re.match('[0-9a-f]{32}', resp[0]), 'Failed to get uuid'
# save doc
resp = db.save_doc(doc1)
assert ('rev' in resp) and ('id' in resp), 'Failed to save doc'
doc1.update({'_id': resp['id'], '_rev': resp['rev']})
# save doc with wrong rev number
try:
db.save_doc({'_id': doc1['_id'], '_rev': 'a'})
raise AssertionError('No error when overwriting doc with wrong rev')
except couch.CouchException:
pass
# has doc
resp = db.has_doc(doc1['_id'])
assert resp, "Failed on getting head of doc"
# has doc on non-existing doc
resp = db.has_doc('a')
assert not resp, "Failed on getting head of non-existing doc"
# get doc
resp = db.get_doc(doc1['_id'])
assert doc1 == resp, 'Failed to get doc'
# get non-existing doc
try:
resp = db.get_doc('a')
raise AssertionError('No error on request for unexisting doc')
except couch.NotFound:
pass
# save docs
doc1['msg2'] = 'Another message'
resp = db.save_docs([doc1, doc2])
assert all('rev' in item and 'id' in item for item in resp), \
'Failed to save docs'
doc1['_rev'] = resp[0]['rev']
doc2.update({'_id': resp[1]['id'], '_rev': resp[1]['rev']})
# get docs
resp = db.get_docs([doc1['_id'], doc2['_id']])
assert [doc1, doc2] == resp, 'Failed to get docs'
# get non-existing docs
try:
resp = db.get_docs(['a', 'b'])
except couch.NotFound:
pass
else:
raise AssertionError('No error on request for unexisting docs')
# list docs
resp = db.view_all_docs(include_docs=True)
assert {doc1['_id']: doc1['_rev'], doc2['_id']: doc2['_rev']} == \
dict((row['doc']['_id'], row['doc']['_rev'])
for row in resp['rows']), 'Failed listing all docs'
# pull database
resp = db2.pull_db(dbname1, create_target=True)
assert 'ok' in resp, 'Replication failed'
assert dbname2 in db2.list_dbs(), \
'Replication failed, new database replication not found'
# delete docs
resp = db2.delete_docs([doc1, doc2])
assert resp[0]['id'] == doc1['_id'] and resp[1]['id'] == doc2['_id'], \
'Failed to delete docs'
assert len(db2.view_all_docs()['rows']) == 0, \
'Failed to delete docs, database not empty'
# delete database
resp = db2.delete_db()
assert 'ok' in resp, 'Failed to delete database'
# upload design doc
design = {
'_id': '_design/test',
'views': {
'msg': {
'map': 'function(doc) { if (doc.msg) { '
'emit(doc._id, doc.msg); } }'
}
}
}
resp = db.save_doc(design)
assert 'ok' in resp, 'Failed to upload design doc'
design['_rev'] = resp['rev']
# view
resp = db.view('test', 'msg')
assert [doc1['_id'], doc2['_id']] == \
[row['key'] for row in resp['rows']], \
'Failed to get view results from design doc'
# delete doc
resp = db.delete_doc(doc2)
assert resp['id'] == doc2['_id'], 'Failed to delete doc2'
# save attachment
data = {'msg3': 'This is a test'}
attachment = {'mimetype': 'application/json', 'name': 'test attachment',
'data': json.dumps(data)}
resp = db.save_attachment(doc1, attachment)
assert 'ok' in resp, 'Attachment not saved'
doc1['_rev'] = resp['rev']
# get attachment
resp = db.get_attachment(doc1, attachment['name'], attachment['mimetype'])
assert json.loads(resp.decode('utf8')) == data, 'Attachment not loaded'
# delete attachment
resp = db.delete_attachment(doc1, attachment['name'])
assert 'ok' in resp, 'Attachment not deleted'
doc1['_rev'] = resp['rev']
# put invalid doc
try:
db.save_doc(doc3)
except ValueError:
pass
else:
raise AssertionError('No error on doc containing NaN')
# done testing, delete test db
db.delete_db()
print('All blocking tests passed')
@gen.coroutine
def run_async_tests():
# set up tests
doc1 = {'msg': 'Test doc 1'}
doc2 = {'msg': 'Test doc 2'}
doc3 = {'value': float('nan')}
db = couch.AsyncCouch(dbname1)
db2 = couch.AsyncCouch(dbname2)
try:
yield db.delete_db()
except couch.NotFound:
pass
try:
yield db2.delete_db()
except couch.NotFound:
pass
# create database
resp = yield db.create_db()
assert 'ok' in resp, 'Failed to create database'
# list databases
resp = yield db.list_dbs()
assert db.db_name in resp, 'Database not in list of databases'
# info_db
resp = yield db.info_db()
assert ('db_name' in resp) and (resp['db_name'] == db.db_name), \
'No database info'
# uuids
resp = yield db.uuids()
assert re.match('[0-9a-f]{32}', resp[0]), 'Failed to get uuid'
# save doc
resp = yield db.save_doc(doc1)
assert ('rev' in resp) and ('id' in resp), 'Failed to save doc'
doc1.update({'_id': resp['id'], '_rev': resp['rev']})
# save doc with wrong rev number
try:
yield db.save_doc({'_id': doc1['_id'], '_rev': 'a'})
raise AssertionError('No error when overwriting doc with wrong rev')
except couch.CouchException:
pass
# get doc
resp = yield db.get_doc(doc1['_id'])
assert doc1 == resp, 'Failed to get doc'
# get non-existing doc
try:
yield db.get_doc('a')
raise AssertionError('No error on request for unexisting doc')
except couch.NotFound:
pass
# has doc
resp = yield db.has_doc(doc1['_id'])
assert resp, "Failed to get doc HEAD"
# has doc on non-existing doc
resp = yield db.has_doc('a')
assert not resp, "Has a non-existing doc"
# save docs
doc1['msg2'] = 'Another message'
resp = yield db.save_docs([doc1, doc2])
assert all('rev' in item and 'id' in item for item in resp), \
'Failed to save docs'
doc1['_rev'] = resp[0]['rev']
doc2.update({'_id': resp[1]['id'], '_rev': resp[1]['rev']})
# get docs
resp = yield db.get_docs([doc1['_id'], doc2['_id']])
assert [doc1, doc2] == resp, 'Failed to get docs'
# get non-existing docs
try:
yield db.get_docs(['a', 'b'])
raise AssertionError('No error on request for unexisting docs')
except couch.NotFound:
pass
# list docs
resp = yield db.view_all_docs(include_docs=True)
assert {doc1['_id']: doc1['_rev'], doc2['_id']: doc2['_rev']} == \
dict((row['doc']['_id'], row['doc']['_rev'])
for row in resp['rows']), 'Failed listing all docs'
# pull database
resp = yield db2.pull_db(dbname1, create_target=True)
assert 'ok' in resp, 'Replication failed'
# verify that replicated db is in the list of dbs
resp = yield db2.list_dbs()
assert dbname2 in resp, \
'Replication failed, new database replication not found'
# delete docs
resp = yield db2.delete_docs([doc1, doc2])
assert resp[0]['id'] == doc1['_id'] and \
resp[1]['id'] == doc2['_id'], 'Failed to delete docs'
# check that deleted docs are not in the list all docs
resp = yield db2.view_all_docs()
assert len(resp['rows']) == 0, 'Failed to delete docs, database not empty'
# delete database
resp = yield db2.delete_db()
assert 'ok' in resp, 'Failed to delete database'
# upload design doc
design = {
'_id': '_design/test',
'views': {
'msg': {
'map': 'function(doc) { if (doc.msg) { '
'emit(doc._id, doc.msg); } }'
}
}
}
resp = yield db.save_doc(design)
assert 'ok' in resp, 'Failed to upload design doc'
design['_rev'] = resp['rev']
# view
resp = yield db.view('test', 'msg')
assert [doc1['_id'], doc2['_id']] == \
[row['key'] for row in resp['rows']], \
'Failed to get view results from design doc'
# delete doc
resp = yield db.delete_doc(doc2)
assert resp['id'] == doc2['_id'], 'Failed to delete doc2'
# save attachment
data = {'msg3': 'This is a test'}
attachment = {'mimetype': 'application/json',
'name': 'test attachment', 'data': json.dumps(data)}
resp = yield db.save_attachment(doc1, attachment)
assert 'ok' in resp, 'Attachment not saved'
doc1['_rev'] = resp['rev']
# get attachment
resp = yield db.get_attachment(doc1, attachment['name'],
attachment['mimetype'])
assert json.loads(resp.decode('utf8')) == data, \
'Attachment not loaded'
# delete attachment
resp = yield db.delete_attachment(doc1, attachment['name'])
assert 'ok' in resp, 'Attachment not deleted'
doc1['_rev'] = resp['rev']
# put invalid doc
try:
yield db.save_doc(doc3)
except ValueError:
pass
else:
raise AssertionError('No error on doc containing NaN')
# done testing, delete test db
yield db.delete_db()
print('All async tests passed')
if __name__ == '__main__':
run_blocking_tests()
ioloop.IOLoop.instance().run_sync(run_async_tests)
| 28.575071
| 78
| 0.586795
| 1,364
| 10,087
| 4.231672
| 0.110704
| 0.028067
| 0.032398
| 0.029106
| 0.863825
| 0.847713
| 0.838531
| 0.808732
| 0.785863
| 0.767845
| 0
| 0.017772
| 0.269257
| 10,087
| 352
| 79
| 28.65625
| 0.765296
| 0.086051
| 0
| 0.574561
| 0
| 0
| 0.256329
| 0.002292
| 0
| 0
| 0
| 0
| 0.22807
| 1
| 0.008772
| false
| 0.061404
| 0.017544
| 0
| 0.026316
| 0.008772
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
304b2e7e9150be85a95490efbcdb4e4d0d025313
| 138,464
|
py
|
Python
|
opsgenie_swagger/api/alert_api.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | null | null | null |
opsgenie_swagger/api/alert_api.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | null | null | null |
opsgenie_swagger/api/alert_api.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | 1
|
2020-11-07T11:27:13.000Z
|
2020-11-07T11:27:13.000Z
|
# coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from opsgenie_swagger.api_client import ApiClient
class AlertApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def acknowledge_alert(self, identifier, **kwargs): # noqa: E501
"""Acknowledge Alert # noqa: E501
Acknowledges alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.acknowledge_alert(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param AcknowledgeAlertPayload body: Request payload of acknowledging alert action
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.acknowledge_alert_with_http_info(identifier, **kwargs) # noqa: E501
else:
(data) = self.acknowledge_alert_with_http_info(identifier, **kwargs) # noqa: E501
return data
def acknowledge_alert_with_http_info(self, identifier, **kwargs): # noqa: E501
"""Acknowledge Alert # noqa: E501
Acknowledges alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.acknowledge_alert_with_http_info(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param AcknowledgeAlertPayload body: Request payload of acknowledging alert action
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'identifier_type', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method acknowledge_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `acknowledge_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/acknowledge', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_attachment(self, identifier, file, **kwargs): # noqa: E501
"""Add Alert Attachment # noqa: E501
Add Alert Attachment to related alert # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_attachment(identifier, file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param file file: Attachment file to be uploaded (required)
:param str alert_identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str user: Display name of the request owner
:param str index_file: Name of html file which will be shown when attachment clicked on UI
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_attachment_with_http_info(identifier, file, **kwargs) # noqa: E501
else:
(data) = self.add_attachment_with_http_info(identifier, file, **kwargs) # noqa: E501
return data
def add_attachment_with_http_info(self, identifier, file, **kwargs): # noqa: E501
"""Add Alert Attachment # noqa: E501
Add Alert Attachment to related alert # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_attachment_with_http_info(identifier, file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param file file: Attachment file to be uploaded (required)
:param str alert_identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str user: Display name of the request owner
:param str index_file: Name of html file which will be shown when attachment clicked on UI
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'file', 'alert_identifier_type', 'user', 'index_file'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_attachment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `add_attachment`") # noqa: E501
# verify the required parameter 'file' is set
if ('file' not in params or
params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `add_attachment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'alert_identifier_type' in params:
query_params.append(('alertIdentifierType', params['alert_identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file'] # noqa: E501
if 'user' in params:
form_params.append(('user', params['user'])) # noqa: E501
if 'index_file' in params:
form_params.append(('indexFile', params['index_file'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/attachments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_details(self, identifier, body, **kwargs): # noqa: E501
"""Add Details # noqa: E501
Add details to the alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_details(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param AddDetailsToAlertPayload body: Request payload of adding alert details action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_details_with_http_info(identifier, body, **kwargs) # noqa: E501
else:
(data) = self.add_details_with_http_info(identifier, body, **kwargs) # noqa: E501
return data
def add_details_with_http_info(self, identifier, body, **kwargs): # noqa: E501
"""Add Details # noqa: E501
Add details to the alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_details_with_http_info(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param AddDetailsToAlertPayload body: Request payload of adding alert details action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'body', 'identifier_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_details" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `add_details`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_details`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/details', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_note(self, identifier, body, **kwargs): # noqa: E501
"""Add Note # noqa: E501
Adds note to alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_note(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param AddNoteToAlertPayload body: Request payload of adding note to alert action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_note_with_http_info(identifier, body, **kwargs) # noqa: E501
else:
(data) = self.add_note_with_http_info(identifier, body, **kwargs) # noqa: E501
return data
def add_note_with_http_info(self, identifier, body, **kwargs): # noqa: E501
"""Add Note # noqa: E501
Adds note to alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_note_with_http_info(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param AddNoteToAlertPayload body: Request payload of adding note to alert action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'body', 'identifier_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_note" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `add_note`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_note`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/notes', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_tags(self, identifier, body, **kwargs): # noqa: E501
"""Add Tags # noqa: E501
Add tags to the alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_tags(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param AddTagsToAlertPayload body: Request payload of creating alert tags action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_tags_with_http_info(identifier, body, **kwargs) # noqa: E501
else:
(data) = self.add_tags_with_http_info(identifier, body, **kwargs) # noqa: E501
return data
def add_tags_with_http_info(self, identifier, body, **kwargs): # noqa: E501
"""Add Tags # noqa: E501
Add tags to the alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_tags_with_http_info(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param AddTagsToAlertPayload body: Request payload of creating alert tags action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'body', 'identifier_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `add_tags`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_tags`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/tags', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_team(self, identifier, body, **kwargs): # noqa: E501
"""Add Team # noqa: E501
Add team to alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_team(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param AddTeamToAlertPayload body: Request payload of adding team to alert action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_team_with_http_info(identifier, body, **kwargs) # noqa: E501
else:
(data) = self.add_team_with_http_info(identifier, body, **kwargs) # noqa: E501
return data
def add_team_with_http_info(self, identifier, body, **kwargs): # noqa: E501
"""Add Team # noqa: E501
Add team to alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_team_with_http_info(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param AddTeamToAlertPayload body: Request payload of adding team to alert action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'body', 'identifier_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_team" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `add_team`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_team`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/teams', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def assign_alert(self, identifier, body, **kwargs): # noqa: E501
"""Assign Alert # noqa: E501
Assign alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_alert(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param AssignAlertPayload body: Request payload of assigning alert action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.assign_alert_with_http_info(identifier, body, **kwargs) # noqa: E501
else:
(data) = self.assign_alert_with_http_info(identifier, body, **kwargs) # noqa: E501
return data
def assign_alert_with_http_info(self, identifier, body, **kwargs): # noqa: E501
"""Assign Alert # noqa: E501
Assign alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_alert_with_http_info(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param AssignAlertPayload body: Request payload of assigning alert action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'body', 'identifier_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method assign_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `assign_alert`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `assign_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/assign', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def close_alert(self, identifier, **kwargs): # noqa: E501
"""Close Alert # noqa: E501
Closes alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.close_alert(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param CloseAlertPayload body: Request payload of closing alert action
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.close_alert_with_http_info(identifier, **kwargs) # noqa: E501
else:
(data) = self.close_alert_with_http_info(identifier, **kwargs) # noqa: E501
return data
def close_alert_with_http_info(self, identifier, **kwargs): # noqa: E501
"""Close Alert # noqa: E501
Closes alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.close_alert_with_http_info(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param CloseAlertPayload body: Request payload of closing alert action
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'identifier_type', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method close_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `close_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/close', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_alert(self, body, **kwargs): # noqa: E501
"""Create Alert # noqa: E501
Creates a new alert # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_alert(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateAlertPayload body: Request payload of created alert (required)
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_alert_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_alert_with_http_info(body, **kwargs) # noqa: E501
return data
def create_alert_with_http_info(self, body, **kwargs): # noqa: E501
"""Create Alert # noqa: E501
Creates a new alert # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_alert_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateAlertPayload body: Request payload of created alert (required)
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_alert`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_saved_searches(self, body, **kwargs): # noqa: E501
"""Create Saved Search # noqa: E501
Create saved search with given fields # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_saved_searches(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateSavedSearchPayload body: Request payload of creating saved search (required)
:return: CreateSavedSearchResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_saved_searches_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_saved_searches_with_http_info(body, **kwargs) # noqa: E501
return data
def create_saved_searches_with_http_info(self, body, **kwargs): # noqa: E501
"""Create Saved Search # noqa: E501
Create saved search with given fields # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_saved_searches_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateSavedSearchPayload body: Request payload of creating saved search (required)
:return: CreateSavedSearchResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_saved_searches" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_saved_searches`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/saved-searches', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateSavedSearchResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_alert(self, identifier, **kwargs): # noqa: E501
"""Delete Alert # noqa: E501
Deletes an alert using alert id, tiny id or alias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_alert(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str user: Display name of the request owner
:param str source: Display name of the request source
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_alert_with_http_info(identifier, **kwargs) # noqa: E501
else:
(data) = self.delete_alert_with_http_info(identifier, **kwargs) # noqa: E501
return data
def delete_alert_with_http_info(self, identifier, **kwargs): # noqa: E501
"""Delete Alert # noqa: E501
Deletes an alert using alert id, tiny id or alias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_alert_with_http_info(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str user: Display name of the request owner
:param str source: Display name of the request source
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'identifier_type', 'user', 'source'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `delete_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
if 'user' in params:
query_params.append(('user', params['user'])) # noqa: E501
if 'source' in params:
query_params.append(('source', params['source'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_saved_search(self, identifier, **kwargs): # noqa: E501
"""Delete Saved Search # noqa: E501
Deletes saved search using given search identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_saved_search(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of the saved search which could be 'id' or 'name' (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', or 'name'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_saved_search_with_http_info(identifier, **kwargs) # noqa: E501
else:
(data) = self.delete_saved_search_with_http_info(identifier, **kwargs) # noqa: E501
return data
def delete_saved_search_with_http_info(self, identifier, **kwargs): # noqa: E501
"""Delete Saved Search # noqa: E501
Deletes saved search using given search identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_saved_search_with_http_info(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of the saved search which could be 'id' or 'name' (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', or 'name'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'identifier_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_saved_search" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `delete_saved_search`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/saved-searches/{identifier}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def escalate_alert(self, identifier, body, **kwargs): # noqa: E501
"""Escalate Alert # noqa: E501
Escalate alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.escalate_alert(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param EscalateAlertToNextPayload body: Request payload of escalating alert action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.escalate_alert_with_http_info(identifier, body, **kwargs) # noqa: E501
else:
(data) = self.escalate_alert_with_http_info(identifier, body, **kwargs) # noqa: E501
return data
def escalate_alert_with_http_info(self, identifier, body, **kwargs): # noqa: E501
"""Escalate Alert # noqa: E501
Escalate alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.escalate_alert_with_http_info(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param EscalateAlertToNextPayload body: Request payload of escalating alert action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'body', 'identifier_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method escalate_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `escalate_alert`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `escalate_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/escalate', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def execute_custom_alert_action(self, identifier, action_name, **kwargs): # noqa: E501
"""Custom Alert Action # noqa: E501
Custom actions for the alert # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute_custom_alert_action(identifier, action_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str action_name: Name of the action to execute (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param ExecuteCustomAlertActionPayload body: Request payload of executing custom alert action
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.execute_custom_alert_action_with_http_info(identifier, action_name, **kwargs) # noqa: E501
else:
(data) = self.execute_custom_alert_action_with_http_info(identifier, action_name, **kwargs) # noqa: E501
return data
def execute_custom_alert_action_with_http_info(self, identifier, action_name, **kwargs): # noqa: E501
"""Custom Alert Action # noqa: E501
Custom actions for the alert # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute_custom_alert_action_with_http_info(identifier, action_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str action_name: Name of the action to execute (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param ExecuteCustomAlertActionPayload body: Request payload of executing custom alert action
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'action_name', 'identifier_type', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method execute_custom_alert_action" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `execute_custom_alert_action`") # noqa: E501
# verify the required parameter 'action_name' is set
if ('action_name' not in params or
params['action_name'] is None):
raise ValueError("Missing the required parameter `action_name` when calling `execute_custom_alert_action`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
if 'action_name' in params:
path_params['actionName'] = params['action_name'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/actions/{actionName}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_alert(self, identifier, **kwargs): # noqa: E501
"""Get Alert # noqa: E501
Returns alert with given id, tiny id or alias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: GetAlertResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_alert_with_http_info(identifier, **kwargs) # noqa: E501
else:
(data) = self.get_alert_with_http_info(identifier, **kwargs) # noqa: E501
return data
def get_alert_with_http_info(self, identifier, **kwargs): # noqa: E501
"""Get Alert # noqa: E501
Returns alert with given id, tiny id or alias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert_with_http_info(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: GetAlertResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'identifier_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `get_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetAlertResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_attachment(self, identifier, attachment_id, **kwargs): # noqa: E501
"""Get Alert Attachment # noqa: E501
Get alert attachment name and url for the given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_attachment(identifier, attachment_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param int attachment_id: Identifier of alert attachment (required)
:param str alert_identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: GetAlertAttachmentResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_attachment_with_http_info(identifier, attachment_id, **kwargs) # noqa: E501
else:
(data) = self.get_attachment_with_http_info(identifier, attachment_id, **kwargs) # noqa: E501
return data
def get_attachment_with_http_info(self, identifier, attachment_id, **kwargs): # noqa: E501
"""Get Alert Attachment # noqa: E501
Get alert attachment name and url for the given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_attachment_with_http_info(identifier, attachment_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param int attachment_id: Identifier of alert attachment (required)
:param str alert_identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: GetAlertAttachmentResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'attachment_id', 'alert_identifier_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_attachment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `get_attachment`") # noqa: E501
# verify the required parameter 'attachment_id' is set
if ('attachment_id' not in params or
params['attachment_id'] is None):
raise ValueError("Missing the required parameter `attachment_id` when calling `get_attachment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
if 'attachment_id' in params:
path_params['attachmentId'] = params['attachment_id'] # noqa: E501
query_params = []
if 'alert_identifier_type' in params:
query_params.append(('alertIdentifierType', params['alert_identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/attachments/{attachmentId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetAlertAttachmentResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_request_status(self, request_id, **kwargs): # noqa: E501
"""Get Request Status of Alert # noqa: E501
Used to track the status and alert details (if any) of the request whose identifier is given # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_request_status(request_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str request_id: Universally unique identifier of the questioned request (required)
:return: GetRequestStatusResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_request_status_with_http_info(request_id, **kwargs) # noqa: E501
else:
(data) = self.get_request_status_with_http_info(request_id, **kwargs) # noqa: E501
return data
def get_request_status_with_http_info(self, request_id, **kwargs): # noqa: E501
"""Get Request Status of Alert # noqa: E501
Used to track the status and alert details (if any) of the request whose identifier is given # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_request_status_with_http_info(request_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str request_id: Universally unique identifier of the questioned request (required)
:return: GetRequestStatusResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['request_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_request_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'request_id' is set
if ('request_id' not in params or
params['request_id'] is None):
raise ValueError("Missing the required parameter `request_id` when calling `get_request_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'request_id' in params:
path_params['requestId'] = params['request_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/requests/{requestId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetRequestStatusResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_saved_search(self, identifier, **kwargs): # noqa: E501
"""Get Saved Search # noqa: E501
Get saved search for the given search identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_saved_search(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of the saved search which could be 'id' or 'name' (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', or 'name'
:return: GetSavedSearchResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_saved_search_with_http_info(identifier, **kwargs) # noqa: E501
else:
(data) = self.get_saved_search_with_http_info(identifier, **kwargs) # noqa: E501
return data
def get_saved_search_with_http_info(self, identifier, **kwargs): # noqa: E501
"""Get Saved Search # noqa: E501
Get saved search for the given search identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_saved_search_with_http_info(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of the saved search which could be 'id' or 'name' (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', or 'name'
:return: GetSavedSearchResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'identifier_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_saved_search" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `get_saved_search`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/saved-searches/{identifier}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetSavedSearchResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_alerts(self, **kwargs): # noqa: E501
"""List Alerts # noqa: E501
Returns list of alerts # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_alerts(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str query: Search query to apply while filtering the alerts
:param str search_identifier: Identifier of the saved search query to apply while filtering the alerts
:param str search_identifier_type: Identifier type of the saved search query. Possible values are 'id', or 'name'
:param int offset: Start index of the result set (to apply pagination). Minimum value (and also default value) is 0
:param int limit: Maximum number of items to provide in the result. Must be a positive integer value. Default value is 20 and maximum value is 100
:param str sort: Name of the field that result set will be sorted by
:param str order: Sorting order of the result set
:return: ListAlertsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_alerts_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_alerts_with_http_info(**kwargs) # noqa: E501
return data
def list_alerts_with_http_info(self, **kwargs): # noqa: E501
"""List Alerts # noqa: E501
Returns list of alerts # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_alerts_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str query: Search query to apply while filtering the alerts
:param str search_identifier: Identifier of the saved search query to apply while filtering the alerts
:param str search_identifier_type: Identifier type of the saved search query. Possible values are 'id', or 'name'
:param int offset: Start index of the result set (to apply pagination). Minimum value (and also default value) is 0
:param int limit: Maximum number of items to provide in the result. Must be a positive integer value. Default value is 20 and maximum value is 100
:param str sort: Name of the field that result set will be sorted by
:param str order: Sorting order of the result set
:return: ListAlertsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['query', 'search_identifier', 'search_identifier_type', 'offset', 'limit', 'sort', 'order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_alerts" % key
)
params[key] = val
del params['kwargs']
if 'offset' in params and params['offset'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `offset` when calling `list_alerts`, must be a value greater than or equal to `0`") # noqa: E501
if 'limit' in params and params['limit'] > 100: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_alerts`, must be a value less than or equal to `100`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_alerts`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'query' in params:
query_params.append(('query', params['query'])) # noqa: E501
if 'search_identifier' in params:
query_params.append(('searchIdentifier', params['search_identifier'])) # noqa: E501
if 'search_identifier_type' in params:
query_params.append(('searchIdentifierType', params['search_identifier_type'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'order' in params:
query_params.append(('order', params['order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListAlertsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_attachments(self, identifier, **kwargs): # noqa: E501
"""List Alert Attachments # noqa: E501
List alert attachment names and urls for related alert # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_attachments(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str alert_identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: ListAlertAttachmentsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_attachments_with_http_info(identifier, **kwargs) # noqa: E501
else:
(data) = self.list_attachments_with_http_info(identifier, **kwargs) # noqa: E501
return data
def list_attachments_with_http_info(self, identifier, **kwargs): # noqa: E501
"""List Alert Attachments # noqa: E501
List alert attachment names and urls for related alert # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_attachments_with_http_info(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str alert_identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: ListAlertAttachmentsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'alert_identifier_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_attachments" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `list_attachments`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'alert_identifier_type' in params:
query_params.append(('alertIdentifierType', params['alert_identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/attachments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListAlertAttachmentsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_logs(self, identifier, **kwargs): # noqa: E501
"""List Alert Logs # noqa: E501
List alert logs for the given alert identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_logs(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str offset: Starting value of the offset property
:param str direction: Page direction to apply for the given offset with 'next' and 'prev'
:param int limit: Maximum number of items to provide in the result. Must be a positive integer value. Default value is 20 and maximum value is 100
:param str order: Sorting order of the result set
:return: ListAlertLogsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_logs_with_http_info(identifier, **kwargs) # noqa: E501
else:
(data) = self.list_logs_with_http_info(identifier, **kwargs) # noqa: E501
return data
def list_logs_with_http_info(self, identifier, **kwargs): # noqa: E501
"""List Alert Logs # noqa: E501
List alert logs for the given alert identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_logs_with_http_info(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str offset: Starting value of the offset property
:param str direction: Page direction to apply for the given offset with 'next' and 'prev'
:param int limit: Maximum number of items to provide in the result. Must be a positive integer value. Default value is 20 and maximum value is 100
:param str order: Sorting order of the result set
:return: ListAlertLogsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'identifier_type', 'offset', 'direction', 'limit', 'order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_logs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `list_logs`") # noqa: E501
if 'limit' in params and params['limit'] > 100: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_logs`, must be a value less than or equal to `100`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_logs`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'direction' in params:
query_params.append(('direction', params['direction'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'order' in params:
query_params.append(('order', params['order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/logs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListAlertLogsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_notes(self, identifier, **kwargs): # noqa: E501
"""List Alert Notes # noqa: E501
List alert notes for the given alert identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_notes(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str offset: Starting value of the offset property
:param str direction: Page direction to apply for the given offset with 'next' and 'prev'
:param int limit: Maximum number of items to provide in the result. Must be a positive integer value. Default value is 20 and maximum value is 100
:param str order: Sorting order of the result set
:return: ListAlertNotesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_notes_with_http_info(identifier, **kwargs) # noqa: E501
else:
(data) = self.list_notes_with_http_info(identifier, **kwargs) # noqa: E501
return data
def list_notes_with_http_info(self, identifier, **kwargs): # noqa: E501
"""List Alert Notes # noqa: E501
List alert notes for the given alert identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_notes_with_http_info(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str offset: Starting value of the offset property
:param str direction: Page direction to apply for the given offset with 'next' and 'prev'
:param int limit: Maximum number of items to provide in the result. Must be a positive integer value. Default value is 20 and maximum value is 100
:param str order: Sorting order of the result set
:return: ListAlertNotesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'identifier_type', 'offset', 'direction', 'limit', 'order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_notes" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `list_notes`") # noqa: E501
if 'limit' in params and params['limit'] > 100: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_notes`, must be a value less than or equal to `100`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_notes`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'direction' in params:
query_params.append(('direction', params['direction'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'order' in params:
query_params.append(('order', params['order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/notes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListAlertNotesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_recipients(self, identifier, **kwargs): # noqa: E501
"""List Alert Recipients # noqa: E501
List alert recipients for the given alert identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_recipients(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: ListAlertRecipientsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_recipients_with_http_info(identifier, **kwargs) # noqa: E501
else:
(data) = self.list_recipients_with_http_info(identifier, **kwargs) # noqa: E501
return data
def list_recipients_with_http_info(self, identifier, **kwargs): # noqa: E501
"""List Alert Recipients # noqa: E501
List alert recipients for the given alert identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_recipients_with_http_info(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: ListAlertRecipientsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'identifier_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_recipients" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `list_recipients`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/recipients', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListAlertRecipientsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_saved_searches(self, **kwargs): # noqa: E501
"""Lists Saved Searches # noqa: E501
List all saved searches # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_saved_searches(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: ListSavedSearchesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_saved_searches_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_saved_searches_with_http_info(**kwargs) # noqa: E501
return data
def list_saved_searches_with_http_info(self, **kwargs): # noqa: E501
"""Lists Saved Searches # noqa: E501
List all saved searches # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_saved_searches_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: ListSavedSearchesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_saved_searches" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/saved-searches', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListSavedSearchesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_attachment(self, identifier, attachment_id, **kwargs): # noqa: E501
"""Remove Alert Attachment # noqa: E501
Remove alert attachment for the given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_attachment(identifier, attachment_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param int attachment_id: Identifier of alert attachment (required)
:param str alert_identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str user: Display name of the request owner
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_attachment_with_http_info(identifier, attachment_id, **kwargs) # noqa: E501
else:
(data) = self.remove_attachment_with_http_info(identifier, attachment_id, **kwargs) # noqa: E501
return data
def remove_attachment_with_http_info(self, identifier, attachment_id, **kwargs): # noqa: E501
"""Remove Alert Attachment # noqa: E501
Remove alert attachment for the given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_attachment_with_http_info(identifier, attachment_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param int attachment_id: Identifier of alert attachment (required)
:param str alert_identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str user: Display name of the request owner
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'attachment_id', 'alert_identifier_type', 'user'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_attachment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `remove_attachment`") # noqa: E501
# verify the required parameter 'attachment_id' is set
if ('attachment_id' not in params or
params['attachment_id'] is None):
raise ValueError("Missing the required parameter `attachment_id` when calling `remove_attachment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
if 'attachment_id' in params:
path_params['attachmentId'] = params['attachment_id'] # noqa: E501
query_params = []
if 'alert_identifier_type' in params:
query_params.append(('alertIdentifierType', params['alert_identifier_type'])) # noqa: E501
if 'user' in params:
query_params.append(('user', params['user'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/attachments/{attachmentId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_details(self, identifier, keys, **kwargs): # noqa: E501
"""Remove Details # noqa: E501
Remove details of the alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_details(identifier, keys, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param list[str] keys: Comma separated list of keys to remove from the custom properties of the alert (e.g. 'key1,key2') (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str user: Display name of the request owner
:param str note: Additional alert note to add
:param str source: Display name of the request source
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_details_with_http_info(identifier, keys, **kwargs) # noqa: E501
else:
(data) = self.remove_details_with_http_info(identifier, keys, **kwargs) # noqa: E501
return data
def remove_details_with_http_info(self, identifier, keys, **kwargs): # noqa: E501
"""Remove Details # noqa: E501
Remove details of the alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_details_with_http_info(identifier, keys, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param list[str] keys: Comma separated list of keys to remove from the custom properties of the alert (e.g. 'key1,key2') (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str user: Display name of the request owner
:param str note: Additional alert note to add
:param str source: Display name of the request source
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'keys', 'identifier_type', 'user', 'note', 'source'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_details" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `remove_details`") # noqa: E501
# verify the required parameter 'keys' is set
if ('keys' not in params or
params['keys'] is None):
raise ValueError("Missing the required parameter `keys` when calling `remove_details`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
if 'user' in params:
query_params.append(('user', params['user'])) # noqa: E501
if 'note' in params:
query_params.append(('note', params['note'])) # noqa: E501
if 'source' in params:
query_params.append(('source', params['source'])) # noqa: E501
if 'keys' in params:
query_params.append(('keys', params['keys'])) # noqa: E501
collection_formats['keys'] = 'csv' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/details', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_tags(self, identifier, tags, **kwargs): # noqa: E501
"""Remove Tags # noqa: E501
Remove tags of the alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_tags(identifier, tags, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param list[str] tags: Tags field of the given alert as comma seperated values (e.g. 'tag1, tag2') (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str user: Display name of the request owner
:param str note: Additional alert note to add
:param str source: Display name of the request source
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_tags_with_http_info(identifier, tags, **kwargs) # noqa: E501
else:
(data) = self.remove_tags_with_http_info(identifier, tags, **kwargs) # noqa: E501
return data
def remove_tags_with_http_info(self, identifier, tags, **kwargs): # noqa: E501
"""Remove Tags # noqa: E501
Remove tags of the alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_tags_with_http_info(identifier, tags, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param list[str] tags: Tags field of the given alert as comma seperated values (e.g. 'tag1, tag2') (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param str user: Display name of the request owner
:param str note: Additional alert note to add
:param str source: Display name of the request source
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'tags', 'identifier_type', 'user', 'note', 'source'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `remove_tags`") # noqa: E501
# verify the required parameter 'tags' is set
if ('tags' not in params or
params['tags'] is None):
raise ValueError("Missing the required parameter `tags` when calling `remove_tags`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
if 'user' in params:
query_params.append(('user', params['user'])) # noqa: E501
if 'note' in params:
query_params.append(('note', params['note'])) # noqa: E501
if 'source' in params:
query_params.append(('source', params['source'])) # noqa: E501
if 'tags' in params:
query_params.append(('tags', params['tags'])) # noqa: E501
collection_formats['tags'] = 'csv' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/tags', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def snooze_alert(self, identifier, body, **kwargs): # noqa: E501
"""Snooze Alert # noqa: E501
Snooze alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.snooze_alert(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param SnoozeAlertPayload body: Request payload of snoozing alert action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.snooze_alert_with_http_info(identifier, body, **kwargs) # noqa: E501
else:
(data) = self.snooze_alert_with_http_info(identifier, body, **kwargs) # noqa: E501
return data
def snooze_alert_with_http_info(self, identifier, body, **kwargs): # noqa: E501
"""Snooze Alert # noqa: E501
Snooze alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.snooze_alert_with_http_info(identifier, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param SnoozeAlertPayload body: Request payload of snoozing alert action (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'body', 'identifier_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method snooze_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `snooze_alert`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `snooze_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/snooze', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def un_acknowledge_alert(self, identifier, **kwargs): # noqa: E501
"""UnAcknowledge Alert # noqa: E501
UnAcknowledge alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.un_acknowledge_alert(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param UnAcknowledgeAlertPayload body: Request payload of unacknowledging alert action
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.un_acknowledge_alert_with_http_info(identifier, **kwargs) # noqa: E501
else:
(data) = self.un_acknowledge_alert_with_http_info(identifier, **kwargs) # noqa: E501
return data
def un_acknowledge_alert_with_http_info(self, identifier, **kwargs): # noqa: E501
"""UnAcknowledge Alert # noqa: E501
UnAcknowledge alert with given identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.un_acknowledge_alert_with_http_info(identifier, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str identifier: Identifier of alert which could be alert id, tiny id or alert alias (required)
:param str identifier_type: Type of the identifier that is provided as an in-line parameter. Possible values are 'id', 'alias' or 'tiny'
:param UnAcknowledgeAlertPayload body: Request payload of unacknowledging alert action
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'identifier_type', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method un_acknowledge_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'identifier' is set
if ('identifier' not in params or
params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `un_acknowledge_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier' in params:
path_params['identifier'] = params['identifier'] # noqa: E501
query_params = []
if 'identifier_type' in params:
query_params.append(('identifierType', params['identifier_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['GenieKey'] # noqa: E501
return self.api_client.call_api(
'/v2/alerts/{identifier}/unacknowledge', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 44.464997
| 155
| 0.6237
| 16,157
| 138,464
| 5.153865
| 0.019744
| 0.046883
| 0.019503
| 0.025075
| 0.97848
| 0.972319
| 0.968248
| 0.961403
| 0.956455
| 0.954138
| 0
| 0.015923
| 0.28879
| 138,464
| 3,113
| 156
| 44.47928
| 0.829666
| 0.377578
| 0
| 0.81331
| 1
| 0.004123
| 0.215604
| 0.04422
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034747
| false
| 0
| 0.002356
| 0
| 0.088928
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0645f6371332e0e6b8fb9ffcc496173231689911
| 106
|
py
|
Python
|
base/handler/__init__.py
|
vralex/RumbleRunner
|
eb9889daf90846176af292d4e7411c41dac885c8
|
[
"MIT"
] | 2
|
2022-01-26T15:06:02.000Z
|
2022-02-03T05:14:52.000Z
|
base/handler/__init__.py
|
vralex/RumbleRunner
|
eb9889daf90846176af292d4e7411c41dac885c8
|
[
"MIT"
] | 1
|
2022-02-07T23:50:26.000Z
|
2022-02-07T23:50:26.000Z
|
base/handler/__init__.py
|
vralex/RumbleRunner
|
eb9889daf90846176af292d4e7411c41dac885c8
|
[
"MIT"
] | 1
|
2022-02-07T23:19:16.000Z
|
2022-02-07T23:19:16.000Z
|
from base.handler.default import *
from base.handler.helpers import *
from base.handler.wrappers import *
| 26.5
| 35
| 0.801887
| 15
| 106
| 5.666667
| 0.466667
| 0.282353
| 0.529412
| 0.494118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113208
| 106
| 3
| 36
| 35.333333
| 0.904255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
0646a968179685955dce9ee8e516a72c2c88ba09
| 15,481
|
py
|
Python
|
sdk/python/pulumi_cloudflare/ip_list.py
|
pulumi/pulumi-cloudflare
|
d444af2fab6101b388a15cf2e3933e45e9935cc6
|
[
"ECL-2.0",
"Apache-2.0"
] | 35
|
2019-03-14T21:29:29.000Z
|
2022-03-30T00:00:59.000Z
|
sdk/python/pulumi_cloudflare/ip_list.py
|
pulumi/pulumi-cloudflare
|
d444af2fab6101b388a15cf2e3933e45e9935cc6
|
[
"ECL-2.0",
"Apache-2.0"
] | 128
|
2019-03-08T23:45:58.000Z
|
2022-03-31T21:05:22.000Z
|
sdk/python/pulumi_cloudflare/ip_list.py
|
pulumi/pulumi-cloudflare
|
d444af2fab6101b388a15cf2e3933e45e9935cc6
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2019-05-10T12:52:56.000Z
|
2020-03-24T15:02:14.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['IpListArgs', 'IpList']
@pulumi.input_type
class IpListArgs:
def __init__(__self__, *,
account_id: pulumi.Input[str],
kind: pulumi.Input[str],
name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
items: Optional[pulumi.Input[Sequence[pulumi.Input['IpListItemArgs']]]] = None):
"""
The set of arguments for constructing a IpList resource.
:param pulumi.Input[str] account_id: The ID of the account where the IP List is being created.
:param pulumi.Input[str] kind: The kind of values in the List. Valid values: `ip`.
:param pulumi.Input[str] name: The name of the list (used in filter expressions). Valid pattern: `^[a-zA-Z0-9_]+$`. Maximum Length: 50
:param pulumi.Input[str] description: A note that can be used to annotate the List. Maximum Length: 500
"""
pulumi.set(__self__, "account_id", account_id)
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "name", name)
if description is not None:
pulumi.set(__self__, "description", description)
if items is not None:
pulumi.set(__self__, "items", items)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> pulumi.Input[str]:
"""
The ID of the account where the IP List is being created.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: pulumi.Input[str]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
The kind of values in the List. Valid values: `ip`.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the list (used in filter expressions). Valid pattern: `^[a-zA-Z0-9_]+$`. Maximum Length: 50
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A note that can be used to annotate the List. Maximum Length: 500
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpListItemArgs']]]]:
return pulumi.get(self, "items")
@items.setter
def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IpListItemArgs']]]]):
pulumi.set(self, "items", value)
@pulumi.input_type
class _IpListState:
def __init__(__self__, *,
account_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
items: Optional[pulumi.Input[Sequence[pulumi.Input['IpListItemArgs']]]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering IpList resources.
:param pulumi.Input[str] account_id: The ID of the account where the IP List is being created.
:param pulumi.Input[str] description: A note that can be used to annotate the List. Maximum Length: 500
:param pulumi.Input[str] kind: The kind of values in the List. Valid values: `ip`.
:param pulumi.Input[str] name: The name of the list (used in filter expressions). Valid pattern: `^[a-zA-Z0-9_]+$`. Maximum Length: 50
"""
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if description is not None:
pulumi.set(__self__, "description", description)
if items is not None:
pulumi.set(__self__, "items", items)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the account where the IP List is being created.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A note that can be used to annotate the List. Maximum Length: 500
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpListItemArgs']]]]:
return pulumi.get(self, "items")
@items.setter
def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IpListItemArgs']]]]):
pulumi.set(self, "items", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
The kind of values in the List. Valid values: `ip`.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the list (used in filter expressions). Valid pattern: `^[a-zA-Z0-9_]+$`. Maximum Length: 50
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class IpList(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
items: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpListItemArgs']]]]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
IP Lists are a set of IP addresses or CIDR ranges that are configured on the account level. Once created, IP Lists can be
used in Firewall Rules across all zones within the same account.
## Example Usage
```python
import pulumi
import pulumi_cloudflare as cloudflare
example = cloudflare.IpList("example",
account_id="d41d8cd98f00b204e9800998ecf8427e",
description="list description",
items=[
cloudflare.IpListItemArgs(
comment="Office IP",
value="192.0.2.1",
),
cloudflare.IpListItemArgs(
comment="Datacenter range",
value="203.0.113.0/24",
),
],
kind="ip",
name="example_list")
```
## Import
An existing IP List can be imported using the account ID and list ID
```sh
$ pulumi import cloudflare:index/ipList:IpList example d41d8cd98f00b204e9800998ecf8427e/cb029e245cfdd66dc8d2e570d5dd3322
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: The ID of the account where the IP List is being created.
:param pulumi.Input[str] description: A note that can be used to annotate the List. Maximum Length: 500
:param pulumi.Input[str] kind: The kind of values in the List. Valid values: `ip`.
:param pulumi.Input[str] name: The name of the list (used in filter expressions). Valid pattern: `^[a-zA-Z0-9_]+$`. Maximum Length: 50
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IpListArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
IP Lists are a set of IP addresses or CIDR ranges that are configured on the account level. Once created, IP Lists can be
used in Firewall Rules across all zones within the same account.
## Example Usage
```python
import pulumi
import pulumi_cloudflare as cloudflare
example = cloudflare.IpList("example",
account_id="d41d8cd98f00b204e9800998ecf8427e",
description="list description",
items=[
cloudflare.IpListItemArgs(
comment="Office IP",
value="192.0.2.1",
),
cloudflare.IpListItemArgs(
comment="Datacenter range",
value="203.0.113.0/24",
),
],
kind="ip",
name="example_list")
```
## Import
An existing IP List can be imported using the account ID and list ID
```sh
$ pulumi import cloudflare:index/ipList:IpList example d41d8cd98f00b204e9800998ecf8427e/cb029e245cfdd66dc8d2e570d5dd3322
```
:param str resource_name: The name of the resource.
:param IpListArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IpListArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
items: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpListItemArgs']]]]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IpListArgs.__new__(IpListArgs)
if account_id is None and not opts.urn:
raise TypeError("Missing required property 'account_id'")
__props__.__dict__["account_id"] = account_id
__props__.__dict__["description"] = description
__props__.__dict__["items"] = items
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
super(IpList, __self__).__init__(
'cloudflare:index/ipList:IpList',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
items: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpListItemArgs']]]]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'IpList':
"""
Get an existing IpList resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: The ID of the account where the IP List is being created.
:param pulumi.Input[str] description: A note that can be used to annotate the List. Maximum Length: 500
:param pulumi.Input[str] kind: The kind of values in the List. Valid values: `ip`.
:param pulumi.Input[str] name: The name of the list (used in filter expressions). Valid pattern: `^[a-zA-Z0-9_]+$`. Maximum Length: 50
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IpListState.__new__(_IpListState)
__props__.__dict__["account_id"] = account_id
__props__.__dict__["description"] = description
__props__.__dict__["items"] = items
__props__.__dict__["kind"] = kind
__props__.__dict__["name"] = name
return IpList(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> pulumi.Output[str]:
"""
The ID of the account where the IP List is being created.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A note that can be used to annotate the List. Maximum Length: 500
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def items(self) -> pulumi.Output[Optional[Sequence['outputs.IpListItem']]]:
return pulumi.get(self, "items")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
The kind of values in the List. Valid values: `ip`.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the list (used in filter expressions). Valid pattern: `^[a-zA-Z0-9_]+$`. Maximum Length: 50
"""
return pulumi.get(self, "name")
| 39.492347
| 142
| 0.610296
| 1,815
| 15,481
| 5.03416
| 0.104132
| 0.089088
| 0.082741
| 0.06501
| 0.815038
| 0.781985
| 0.765897
| 0.738098
| 0.728138
| 0.717194
| 0
| 0.018144
| 0.28086
| 15,481
| 391
| 143
| 39.59335
| 0.802569
| 0.340482
| 0
| 0.642512
| 1
| 0
| 0.085661
| 0.003261
| 0
| 0
| 0
| 0
| 0
| 1
| 0.154589
| false
| 0.004831
| 0.033816
| 0.014493
| 0.280193
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
069a08e0d8d820ced8b68cb1453340f7548b4732
| 4,592
|
py
|
Python
|
mysites/search_indexes.py
|
cmwaura/Newspade
|
ac434ea6ab5ae8ce4ae144e6746bfed214316a7c
|
[
"MIT"
] | null | null | null |
mysites/search_indexes.py
|
cmwaura/Newspade
|
ac434ea6ab5ae8ce4ae144e6746bfed214316a7c
|
[
"MIT"
] | 1
|
2016-04-12T18:25:39.000Z
|
2016-04-21T22:56:03.000Z
|
mysites/search_indexes.py
|
cmwaura/Newspade
|
ac434ea6ab5ae8ce4ae144e6746bfed214316a7c
|
[
"MIT"
] | 3
|
2016-04-12T18:24:16.000Z
|
2018-11-28T13:33:56.000Z
|
import datetime
from haystack import indexes
from celery_haystack.indexes import CelerySearchIndex
from .models import ReutersNews, BBCNews, AljazeeraNews, PoliticoNews, EconomistNews, ChristianScienceMonitor
from .models import ChristianScienceMonitor, WikiNews, GuardianNews
from articles.models import Articles
class NewsAdIndex(CelerySearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return BBCNews
class ReutersAdIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return ReutersNews
class AljazeeraAdIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return AljazeeraNews
class PoliticoAdIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return PoliticoNews
class ChristianScienceAdIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return ChristianScienceMonitor
class WikiNewsAdIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return WikiNews
class GuardianAdIndex(indexes.SearchIndex, indexes.Indexable):
'''
In this case we are creating a search index basesd in the django models
of JobAd. Why JobAd? because that is where all the new jobs that we have
scraped are stored. In this way anybody can search for a particular job and
get a good estimate result. It inherits from haystack indexes and from the
class SearchIndex and Indexable, gets the relevant model and returns then
JobAd.
'''
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return GuardianNews
class WikiNewsAdIndex(indexes.SearchIndex, indexes.Indexable):
'''
In this case we are creating a search index basesd in the django models
of JobAd. Why JobAd? because that is where all the new jobs that we have
scraped are stored. In this way anybody can search for a particular job and
get a good estimate result. It inherits from haystack indexes and from the
class SearchIndex and Indexable, gets the relevant model and returns then
JobAd.
'''
text = indexes.CharField(document=True, use_template=True, template_name="search/indexes/mysites/jobad_text.txt")
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
url = indexes.CharField(model_attr='url')
date = indexes.DateTimeField(model_attr='date')
def get_model(self):
return WikiNews
| 41
| 114
| 0.790941
| 604
| 4,592
| 5.905629
| 0.150662
| 0.143538
| 0.141295
| 0.168209
| 0.862069
| 0.862069
| 0.850855
| 0.850855
| 0.850855
| 0.850855
| 0
| 0.006824
| 0.10649
| 4,592
| 111
| 115
| 41.369369
| 0.86254
| 0.165287
| 0
| 0.728571
| 0
| 0
| 0.126216
| 0.077833
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.085714
| 0.114286
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
230d9d6755fc10d49f72873ae214bee7368fe109
| 98
|
py
|
Python
|
contrib/diggext/drivers/locations/__init__.py
|
thekad/clusto
|
c141ea3ef4931c6a21fdf42845c6e9de5ee08caa
|
[
"BSD-3-Clause"
] | 216
|
2015-01-10T17:03:25.000Z
|
2022-03-24T07:23:41.000Z
|
contrib/diggext/drivers/locations/__init__.py
|
thekad/clusto
|
c141ea3ef4931c6a21fdf42845c6e9de5ee08caa
|
[
"BSD-3-Clause"
] | 23
|
2015-01-08T16:51:22.000Z
|
2021-03-13T12:56:04.000Z
|
contrib/diggext/drivers/locations/__init__.py
|
thekad/clusto
|
c141ea3ef4931c6a21fdf42845c6e9de5ee08caa
|
[
"BSD-3-Clause"
] | 49
|
2015-01-08T00:13:17.000Z
|
2021-09-22T02:01:20.000Z
|
from diggext.drivers.locations.datacenters import *
from diggext.drivers.locations.racks import *
| 32.666667
| 51
| 0.836735
| 12
| 98
| 6.833333
| 0.583333
| 0.268293
| 0.439024
| 0.658537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 98
| 2
| 52
| 49
| 0.911111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
23431973d0bcf65a36ae61549c0af8dadab98558
| 155
|
py
|
Python
|
synthtorch/models/__init__.py
|
jcreinhold/synthtorch
|
bb6eb20641b2cae3cbb96421b12e03865b5c5095
|
[
"Apache-2.0"
] | 23
|
2019-05-15T12:27:29.000Z
|
2021-10-03T09:03:08.000Z
|
synthtorch/models/__init__.py
|
jcreinhold/synthtorch
|
bb6eb20641b2cae3cbb96421b12e03865b5c5095
|
[
"Apache-2.0"
] | 14
|
2019-05-08T22:06:00.000Z
|
2021-09-24T19:47:46.000Z
|
synthtorch/models/__init__.py
|
jcreinhold/synthtorch
|
bb6eb20641b2cae3cbb96421b12e03865b5c5095
|
[
"Apache-2.0"
] | 4
|
2019-08-29T03:25:03.000Z
|
2020-06-30T03:53:38.000Z
|
from synthtorch.models.densenet import *
from synthtorch.models.nconvnet import *
from synthtorch.models.unet import *
from synthtorch.models.vae import *
| 31
| 40
| 0.819355
| 20
| 155
| 6.35
| 0.4
| 0.440945
| 0.629921
| 0.614173
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103226
| 155
| 4
| 41
| 38.75
| 0.913669
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
88e75658d819a16a7721000dce8f9248ba68507a
| 1,408
|
py
|
Python
|
homeschool/schools/migrations/0002_auto_20210422_1433.py
|
chriswedgwood/homeschool
|
d5267b13154aaa52c9c3edbf06b251f123583ae8
|
[
"MIT"
] | 154
|
2019-12-24T17:45:44.000Z
|
2022-03-30T23:03:06.000Z
|
homeschool/schools/migrations/0002_auto_20210422_1433.py
|
chriswedgwood/homeschool
|
d5267b13154aaa52c9c3edbf06b251f123583ae8
|
[
"MIT"
] | 397
|
2019-11-05T03:23:45.000Z
|
2022-03-31T04:51:55.000Z
|
homeschool/schools/migrations/0002_auto_20210422_1433.py
|
chriswedgwood/homeschool
|
d5267b13154aaa52c9c3edbf06b251f123583ae8
|
[
"MIT"
] | 44
|
2020-02-24T13:08:52.000Z
|
2022-02-24T05:03:13.000Z
|
# Generated by Django 3.1.8 on 2021-04-22 14:33
import hashid_field.field
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("schools", "0001_initial")]
operations = [
migrations.AlterField(
model_name="gradelevel",
name="id",
field=hashid_field.field.HashidAutoField(
alphabet="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", # noqa
min_length=7,
prefix="",
primary_key=True,
serialize=False,
),
),
migrations.AlterField(
model_name="schoolbreak",
name="id",
field=hashid_field.field.HashidAutoField(
alphabet="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", # noqa
min_length=7,
prefix="",
primary_key=True,
serialize=False,
),
),
migrations.AlterField(
model_name="schoolyear",
name="id",
field=hashid_field.field.HashidAutoField(
alphabet="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", # noqa
min_length=7,
prefix="",
primary_key=True,
serialize=False,
),
),
]
| 30.608696
| 98
| 0.546165
| 102
| 1,408
| 7.401961
| 0.441176
| 0.058278
| 0.084768
| 0.115232
| 0.712583
| 0.712583
| 0.712583
| 0.712583
| 0.712583
| 0.712583
| 0
| 0.058231
| 0.365767
| 1,408
| 45
| 99
| 31.288889
| 0.787234
| 0.042614
| 0
| 0.769231
| 1
| 0
| 0.180194
| 0.138496
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.051282
| 0
| 0.128205
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
88efb102eb4ba32bbda35cf3e7e5012a85790f55
| 22,545
|
py
|
Python
|
scale/timeline/test/test_views.py
|
kaydoh/scale
|
1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee
|
[
"Apache-2.0"
] | 121
|
2015-11-18T18:15:33.000Z
|
2022-03-10T01:55:00.000Z
|
scale/timeline/test/test_views.py
|
kaydoh/scale
|
1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee
|
[
"Apache-2.0"
] | 1,415
|
2015-12-23T23:36:04.000Z
|
2022-01-07T14:10:09.000Z
|
scale/timeline/test/test_views.py
|
kaydoh/scale
|
1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee
|
[
"Apache-2.0"
] | 66
|
2015-12-03T20:38:56.000Z
|
2020-07-27T15:28:11.000Z
|
from __future__ import unicode_literals
import copy
import datetime
import django
import json
from django.utils.timezone import utc
import job.test.utils as job_test_utils
import recipe.test.utils as recipe_test_utils
import storage.test.utils as storage_test_utils
from recipe.models import RecipeType
from rest_framework import status
from rest_framework.test import APITestCase
from util import rest
class TestRecipeTypeTimelineView(APITestCase):
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
# create a couple job types
manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)
manifest['job']['name'] = 'test-job-1'
manifest['job']['interface']['inputs'] = {'files': [{'name': 'INPUT_FILE', 'required': True,
'mediaTypes': ['image/png'], 'partial': False}]}
self.job_type_1 = job_test_utils.create_seed_job_type(manifest=manifest)
manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)
manifest['job']['name'] = 'test-job-2'
manifest['job']['interface']['inputs'] = {'files': [{'name': 'INPUT_FILE', 'required': True,
'mediaTypes': ['image/png'], 'partial': False}]}
self.job_type_2 = job_test_utils.create_seed_job_type(manifest=manifest)
# create recipe types
recipe_def = {
'version': '7',
'input': {'files': [{'name': 'INPUT_FILE', 'media_types': ['image/png'], 'required': True,
'multiple': False}],
'json': []},
'nodes': {
'node_a': {
'dependencies': [],
'input': {'INPUT_FILE': {'type': 'recipe', 'input': 'INPUT_FILE'}},
'node_type': {'node_type': 'job', 'job_type_name': self.job_type_1.name,
'job_type_version': self.job_type_1.version,
'job_type_revision': self.job_type_1.revision_num}
}
}
}
self.recipe_type_1 = recipe_test_utils.create_recipe_type_v6(definition=recipe_def)
recipe_def = {
'version': '7',
'input': {'files': [{'name': 'INPUT_FILE', 'media_types': ['image/png'], 'required': True,
'multiple': False}],
'json': []},
'nodes': {
'node_a': {
'dependencies': [],
'input': {'INPUT_FILE': {'type': 'recipe', 'input': 'INPUT_FILE'}},
'node_type': {'node_type': 'job', 'job_type_name': self.job_type_2.name,
'job_type_version': self.job_type_2.version,
'job_type_revision': self.job_type_2.revision_num}
},
'node_b': {
'dependencies': [],
'input': {'INPUT_FILE': {'type': 'recipe', 'input': 'INPUT_FILE'}},
'node_type': {'node_type': 'job', 'job_type_name': self.job_type_1.name,
'job_type_version': self.job_type_1.version,
'job_type_revision': self.job_type_1.revision_num}
}
}
}
self.recipe_type_2 = recipe_test_utils.create_recipe_type_v6(definition=recipe_def)
# create recipes & jobs
self.workspace = storage_test_utils.create_workspace()
for i in range(1, 7):
date_1 = datetime.datetime(2020, 1, i, tzinfo=utc)
date_2 = datetime.datetime(2020, 1, i+1, tzinfo=utc)
date_3 = datetime.datetime(2020, i, i+1, tzinfo=utc)
file_1 = storage_test_utils.create_file(workspace=self.workspace, file_size=104857600.0,
source_started=date_1, source_ended=date_2)
input_data = {
'version': '1.0',
'input_data': [{
'name': 'INPUT_FILE',
'file_id': file_1.id
}]
}
# Recipe 1's jobs
recipe_1 = recipe_test_utils.create_recipe(recipe_type=self.recipe_type_1, input=input_data)
job_1 = job_test_utils.create_job(job_type=self.job_type_1, status='COMPLETED', started=date_1, ended=date_1)
job_1.recipe_id = recipe_1.id
job_1.save()
# Recipe 2s jobs
recipe_2 = recipe_test_utils.create_recipe(recipe_type=self.recipe_type_2, input=input_data)
job_2 = job_test_utils.create_job(job_type=self.job_type_2, status='COMPLETED', started=date_2, ended=date_2)
job_2.recipe_id = recipe_2.id
job_2.save()
job_3 = job_test_utils.create_job(job_type=self.job_type_1, status='COMPLETED', started=date_3,
ended=date_3)
job_3.recipe_id = recipe_2.id
job_3.save()
def test_successful(self):
started = '2020-01-01T00:00:00Z'
ended = '2020-02-01T00:00:00Z'
url = '/%s/timeline/recipe-types/?started=%s&ended=%s' % (self.api, started, ended)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
results = result['results']
self.assertEqual(len(results), 2)
for result in results:
the_type = None
if result['recipe_type_id'] == self.recipe_type_1.id:
the_type = self.recipe_type_1
elif result['recipe_type_id'] == self.recipe_type_2.id:
the_type = self.recipe_type_2
self.assertEqual(result['name'], the_type.name)
self.assertEqual(result['title'], the_type.title)
self.assertEqual(result['revision_num'], the_type.revision_num)
def test_range(self):
started = '2020-01-01T00:00:00Z'
url = '/%s/timeline/recipe-types/?started=%s' % (self.api, started)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
def test_type_name(self):
"""Tests calling /timeline/recipe-types/ filtered by recipe type ids"""
started = '2020-01-01T00:00:00Z'
ended = '2020-02-01T00:00:00Z'
url = '/%s/timeline/recipe-types/?started=%s&ended=%s&name=%s' % (self.api, started, ended,
self.recipe_type_1.name)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
results = result['results']
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['name'], self.recipe_type_1.name)
self.assertEqual(results[0]['revision_num'], self.recipe_type_1.revision_num)
def test_type_ids(self):
"""Tests calling /timeline/recipe-types/ filtered by recipe type names"""
started = '2020-01-01T00:00:00Z'
ended = '2020-02-01T00:00:00Z'
url = '/%s/timeline/recipe-types/?started=%s&ended=%s&id=%s' % (self.api, started, ended,
self.recipe_type_2.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
results = result['results']
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['name'], self.recipe_type_2.name)
self.assertEqual(results[0]['revision_num'], self.recipe_type_2.revision_num)
def test_type_revisions(self):
"""Tests calling /timeline/recipe-types/ filtered by recipe type names"""
# create recipe type
recipe_def = {
'version': '7',
'input': {'files': [{'name': 'INPUT_FILE', 'media_types': ['image/png'], 'required': True,
'multiple': False}],
'json': []},
'nodes': {
'node_a': {
'dependencies': [],
'input': {'INPUT_FILE': {'type': 'recipe', 'input': 'INPUT_FILE'}},
'node_type': {'node_type': 'job', 'job_type_name': self.job_type_1.name,
'job_type_version': self.job_type_1.version,
'job_type_revision': self.job_type_1.revision_num}
}
}
}
rtype = recipe_test_utils.create_recipe_type_v6(name='revision-recipe', definition=recipe_def)
recipe_def_v2 = {
'version': '7',
'input': {'files': [{'name': 'INPUT_FILE', 'media_types': ['image/png'], 'required': True,
'multiple': False}],
'json': []},
'nodes': {
'node_a': {
'dependencies': [],
'input': {'INPUT_FILE': {'type': 'recipe', 'input': 'INPUT_FILE'}},
'node_type': {'node_type': 'job', 'job_type_name': self.job_type_1.name,
'job_type_version': self.job_type_1.version,
'job_type_revision': self.job_type_1.revision_num}
},
'node_b': {
'dependencies': [],
'input': {'INPUT_FILE': {'type': 'recipe', 'input': 'INPUT_FILE'}},
'node_type': {'node_type': 'job', 'job_type_name': self.job_type_2.name,
'job_type_version': self.job_type_2.version,
'job_type_revision': self.job_type_2.revision_num}
}
}
}
recipe_test_utils.edit_recipe_type_v6(rtype, title='edited recipe', definition=recipe_def_v2, auto_update=False)
rtype_edit = RecipeType.objects.get(id=rtype.id)
for i in range(1, 7):
date_1 = datetime.datetime(2020, 1, i, tzinfo=utc)
date_2 = datetime.datetime(2020, 1, i+1, tzinfo=utc)
file_1 = storage_test_utils.create_file(workspace=self.workspace, file_size=104857600.0,
source_started=date_1, source_ended=date_2)
input_data = {
'version': '1.0',
'input_data': [{
'name': 'INPUT_FILE',
'file_id': file_1.id
}]
}
# Recipe 1's jobs
recipe_1 = recipe_test_utils.create_recipe(recipe_type=rtype_edit, input=input_data)
job_1 = job_test_utils.create_job(job_type=self.job_type_1, status='COMPLETED', started=date_1, ended=date_1)
job_1.recipe_id = recipe_1.id
job_1.save()
job_2 = job_test_utils.create_job(job_type=self.job_type_2, status='COMPLETED', started=date_2, ended=date_2)
job_2.recipe_id = recipe_1.id
job_2.save()
started = '2020-01-01T00:00:00Z'
ended = '2020-02-01T00:00:00Z'
url = '/%s/timeline/recipe-types/?started=%s&ended=%s&id=%s&rev=%s' % (self.api, started, ended,
rtype_edit.id, rtype_edit.revision_num)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
results = result['results']
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['name'], rtype_edit.name)
self.assertEqual(results[0]['revision_num'], rtype_edit.revision_num)
self.assertEqual(results[0]['title'], rtype_edit.title)
def test_no_range(self):
"""Tests calling /timeline/recipe-types with no date range"""
url = '/%s/timeline/recipe-types/' % self.api
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
class TestJobTypeTimelineView(APITestCase):
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
# create a couple job types
manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)
manifest['job']['name'] = 'test-job-1'
manifest['job']['interface']['inputs'] = {'files': [{'name': 'INPUT_FILE', 'required': True,
'mediaTypes': ['image/png'], 'partial': False}]}
self.job_type_1 = job_test_utils.create_seed_job_type(manifest=manifest)
manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)
manifest['job']['name'] = 'test-job-2'
manifest['job']['interface']['inputs'] = {'files': [{'name': 'INPUT_FILE', 'required': True,
'mediaTypes': ['image/png'], 'partial': False}]}
self.job_type_2 = job_test_utils.create_seed_job_type(manifest=manifest)
# create recipe types
recipe_def = {
'version': '7',
'input': {'files': [{'name': 'INPUT_FILE', 'media_types': ['image/png'], 'required': True,
'multiple': False}],
'json': []},
'nodes': {
'node_a': {
'dependencies': [],
'input': {'INPUT_FILE': {'type': 'recipe', 'input': 'INPUT_FILE'}},
'node_type': {'node_type': 'job', 'job_type_name': self.job_type_1.name,
'job_type_version': self.job_type_1.version,
'job_type_revision': self.job_type_1.revision_num}
}
}
}
self.recipe_type_1 = recipe_test_utils.create_recipe_type_v6(definition=recipe_def)
recipe_def = {
'version': '7',
'input': {'files': [{'name': 'INPUT_FILE', 'media_types': ['image/png'], 'required': True,
'multiple': False}],
'json': []},
'nodes': {
'node_a': {
'dependencies': [],
'input': {'INPUT_FILE': {'type': 'recipe', 'input': 'INPUT_FILE'}},
'node_type': {'node_type': 'job', 'job_type_name': self.job_type_2.name,
'job_type_version': self.job_type_2.version,
'job_type_revision': self.job_type_2.revision_num}
},
'node_b': {
'dependencies': [],
'input': {'INPUT_FILE': {'type': 'recipe', 'input': 'INPUT_FILE'}},
'node_type': {'node_type': 'job', 'job_type_name': self.job_type_1.name,
'job_type_version': self.job_type_1.version,
'job_type_revision': self.job_type_1.revision_num}
}
}
}
self.recipe_type_2 = recipe_test_utils.create_recipe_type_v6(definition=recipe_def)
# create recipes & jobs
self.workspace = storage_test_utils.create_workspace()
for i in range(1, 7):
date_1 = datetime.datetime(2020, 1, i, tzinfo=utc)
date_2 = datetime.datetime(2020, 1, i + 1, tzinfo=utc)
date_3 = datetime.datetime(2020, i, i + 1, tzinfo=utc)
file_1 = storage_test_utils.create_file(workspace=self.workspace, file_size=104857600.0,
source_started=date_1, source_ended=date_2)
input_data = {
'version': '1.0',
'input_data': [{
'name': 'INPUT_FILE',
'file_id': file_1.id
}]
}
# Recipe 1's jobs
recipe_1 = recipe_test_utils.create_recipe(recipe_type=self.recipe_type_1, input=input_data)
job_1 = job_test_utils.create_job(job_type=self.job_type_1, status='COMPLETED', started=date_1,
ended=date_1)
job_1.recipe_id = recipe_1.id
job_1.save()
# Recipe 2s jobs
recipe_2 = recipe_test_utils.create_recipe(recipe_type=self.recipe_type_2, input=input_data)
job_2 = job_test_utils.create_job(job_type=self.job_type_2, status='COMPLETED', started=date_2,
ended=date_2)
job_2.recipe_id = recipe_2.id
job_2.save()
job_3 = job_test_utils.create_job(job_type=self.job_type_1, status='COMPLETED', started=date_3,
ended=date_3)
job_3.recipe_id = recipe_2.id
job_3.save()
def test_successful(self):
started = '2020-01-01T00:00:00Z'
ended = '2020-02-01T00:00:00Z'
url = '/%s/timeline/job-types/?started=%s&ended=%s' % (self.api, started, ended)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
results = result['results']
self.assertEqual(len(results), 2)
for result in results:
the_type = None
if result['name'] == self.job_type_1.name:
the_type = self.job_type_1
elif result['name'] == self.job_type_2.name:
the_type = self.job_type_2
self.assertEqual(result['name'], the_type.name)
self.assertEqual(result['title'], the_type.get_title())
self.assertEqual(result['revision_num'], the_type.revision_num)
def test_type_ids(self):
"""Tests calling /timeline/recipe-types/ filtered by recipe type ids"""
started = '2020-01-01T00:00:00Z'
ended = '2020-02-01T00:00:00Z'
url = '/%s/timeline/job-types/?started=%s&ended=%s&name=%s' % (self.api, started, ended, self.job_type_1.name)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
results = result['results']
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['name'], self.job_type_1.name)
self.assertEqual(results[0]['title'], self.job_type_1.get_title())
self.assertEqual(results[0]['revision_num'], self.job_type_1.revision_num)
def test_type_names(self):
"""Tests calling /timeline/job-types/ filtered by job type names"""
started = '2020-01-01T00:00:00Z'
ended = '2020-02-01T00:00:00Z'
url = '/%s/timeline/job-types/?started=%s&ended=%s&id=%s' % (self.api, started, ended, self.job_type_2.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
results = result['results']
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['name'], self.job_type_2.name)
self.assertEqual(results[0]['title'], self.job_type_2.get_title())
self.assertEqual(results[0]['revision_num'], self.job_type_2.revision_num)
def test_type_versions(self):
"""Tests calling /timeline/job-types filtered by job version types"""
manifest = copy.deepcopy(self.job_type_1.manifest)
manifest['job']['jobVersion'] = '1.1.1'
job_type = job_test_utils.create_seed_job_type(manifest=manifest)
recipe_def = {
'version': '7',
'input': {'files': [{'name': 'INPUT_FILE', 'media_types': ['image/png'], 'required': True,
'multiple': False}],
'json': []},
'nodes': {
'node_a': {
'dependencies': [],
'input': {'INPUT_FILE': {'type': 'recipe', 'input': 'INPUT_FILE'}},
'node_type': {'node_type': 'job', 'job_type_name': job_type.name,
'job_type_version': job_type.version,
'job_type_revision': job_type.revision_num}
}
}
}
recipe_test_utils.edit_recipe_type_v6(self.recipe_type_1, definition=recipe_def, auto_update=False)
recipe_edited = RecipeType.objects.get(id=self.recipe_type_1.id)
for i in range(1, 7):
date_1 = datetime.datetime(2020, 1, i, tzinfo=utc)
date_2 = datetime.datetime(2020, 1, i + 1, tzinfo=utc)
file_1 = storage_test_utils.create_file(workspace=self.workspace, file_size=104857600.0,
source_started=date_1, source_ended=date_2)
input_data = {'version': '1.0', 'input_data': [{'name': 'INPUT_FILE','file_id': file_1.id}]}
recipe_1 = recipe_test_utils.create_recipe(recipe_type=recipe_edited, input=input_data)
job_1 = job_test_utils.create_job(job_type=job_type, status='COMPLETED', started=date_1,
ended=date_1)
job_1.recipe_id = recipe_1.id
job_1.save()
started = '2020-01-01T00:00:00Z'
ended = '2020-02-01T00:00:00Z'
url = '/%s/timeline/job-types/?started=%s&ended=%s&name=%s&version=%s' % (self.api, started, ended,
job_type.name, job_type.version)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
results = result['results']
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['name'], job_type.name)
self.assertEqual(results[0]['title'], job_type.get_title())
self.assertEqual(results[0]['version'], job_type.version)
self.assertEqual(results[0]['revision_num'], job_type.revision_num)
| 46.96875
| 121
| 0.550943
| 2,603
| 22,545
| 4.502497
| 0.054552
| 0.064505
| 0.048805
| 0.032765
| 0.91459
| 0.90128
| 0.873976
| 0.855205
| 0.853669
| 0.825427
| 0
| 0.038397
| 0.317277
| 22,545
| 479
| 122
| 47.066806
| 0.723038
| 0.030295
| 0
| 0.714286
| 0
| 0.005195
| 0.162373
| 0.021958
| 0
| 0
| 0
| 0
| 0.106494
| 1
| 0.031169
| false
| 0
| 0.033766
| 0
| 0.075325
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cc8c553a3c0ff92b9b931e9d059d7e013d14824b
| 161
|
py
|
Python
|
practice_circleci/core.py
|
ken0407/practice_circleci
|
90f774149431a841c691358b745faceb674f77b9
|
[
"BSD-2-Clause"
] | null | null | null |
practice_circleci/core.py
|
ken0407/practice_circleci
|
90f774149431a841c691358b745faceb674f77b9
|
[
"BSD-2-Clause"
] | null | null | null |
practice_circleci/core.py
|
ken0407/practice_circleci
|
90f774149431a841c691358b745faceb674f77b9
|
[
"BSD-2-Clause"
] | null | null | null |
class int_num():
def __init__(self, x):
self.value = x
def add(self, y):
self.value += y
def diff(self, y):
self.value -= y
| 17.888889
| 26
| 0.503106
| 24
| 161
| 3.166667
| 0.458333
| 0.355263
| 0.236842
| 0.368421
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.354037
| 161
| 9
| 27
| 17.888889
| 0.730769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
cc90de10cae307962d3a1f405123848c6de88fef
| 44,973
|
py
|
Python
|
servers/cromwell/jobs/test/test_jobs_controller.py
|
bvprivate/job-monitor
|
7d1fb6a9680598baf7ae6a48e7841fa0ffcb7427
|
[
"BSD-3-Clause"
] | null | null | null |
servers/cromwell/jobs/test/test_jobs_controller.py
|
bvprivate/job-monitor
|
7d1fb6a9680598baf7ae6a48e7841fa0ffcb7427
|
[
"BSD-3-Clause"
] | 1
|
2017-12-04T22:45:46.000Z
|
2017-12-04T22:45:46.000Z
|
servers/cromwell/jobs/test/test_jobs_controller.py
|
DataBiosphere/job-monitor
|
7d1fb6a9680598baf7ae6a48e7841fa0ffcb7427
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
import requests_mock
from flask import json
from datetime import datetime
from dateutil.tz import *
import dateutil.parser
from . import BaseTestCase
from jobs.models.extended_fields import ExtendedFields
from jobs.models.query_jobs_request import QueryJobsRequest
from jobs.models.query_jobs_result import QueryJobsResult
from jobs.models.update_job_labels_request import UpdateJobLabelsRequest
from jobs.models.update_job_labels_response import UpdateJobLabelsResponse
from jobs.controllers import jobs_controller
class TestJobsController(BaseTestCase):
""" JobsController integration test stubs """
maxDiff = None
def setUp(self):
self.base_url = 'https://test-cromwell.org'
self.app.config.update({
'cromwell_url': self.base_url,
'cromwell_user': 'user',
'cromwell_password': 'password',
'use_caas': False,
'capabilities': {}
})
@requests_mock.mock()
def test_abort_job(self, mock_request):
"""
Test case for abort_job
Abort a job by ID
"""
workflow_id = 'id'
def _request_callback(request, context):
context.status_code = 200
abort_url = self.base_url + '/{id}/abort'.format(id=workflow_id)
mock_request.post(abort_url, json=_request_callback)
response = self.client.open('/jobs/{id}/abort'.format(id=workflow_id),
method='POST')
self.assertStatus(response, 200)
@requests_mock.mock()
def test_abort_job_not_found(self, mock_request):
"""Test that aborting a job that is not running returns a 404 response."""
workflow_id = 'id'
def _request_callback(request, context):
context.status_code = 404
return {
'status':
'error',
'message':
'Couldn\'t abort {} because no workflow with that ID is in progress'
.format(workflow_id)
}
abort_url = self.base_url + '/{id}/abort'.format(id=workflow_id)
mock_request.post(abort_url, json=_request_callback)
response = self.client.open('/jobs/{id}/abort'.format(id=workflow_id),
method='POST')
self.assertStatus(response, 404)
@requests_mock.mock()
def test_update_job_labels_returns_200(self, mock_request):
"""
Test case for update_job_labels.
Update job's labels. Currently Cromwell will ONLY return the UPDATED labels instead of ALL labels of the job,
the Job Manager makes two separate HTTP requests here to Cromwell so that it can get ALL labels.
"""
workflow_id = 'id'
workflow_name = 'test'
status = 'Succeeded'
timestamp = '2017-11-08T05:06:41.424Z'
inputs = {'test.inputs': 'gs://project-bucket/test/inputs.txt'}
outputs = {
'test.analysis.outputs': 'gs://project-bucket/test/outputs.txt'
}
labels = {}
backend_log = '/cromwell/cromwell-executions/id/call-analysis/call-analysis-log'
attempts = 1
return_code = 0
def _request_callback_labels(request, context):
context.status_code = 200
return {"labels": {"test_label": "test_label_value"}}
def _request_callback_get_job(request, context):
context.status_code = 200
return {
'workflowName': workflow_name,
'id': workflow_id,
'status': status,
'calls': {
'test.analysis': [{
'executionStatus': 'Done',
'start': timestamp,
'end': timestamp,
'backendLog': backend_log,
'returnCode': return_code,
'inputs': inputs,
'outputs': outputs,
'attempt': attempts
}]
},
'inputs': inputs,
'labels': labels,
'outputs': outputs,
'submission': timestamp,
'end': timestamp,
'start': timestamp,
'failures': [
{'causedBy': [
{
'causedBy': [],
'message': 'Task test.analysis failed'
}
],
'message': 'Workflow failed'}
]
} # yapf: disable
update_label_url = self.base_url + '/{id}/labels'.format(
id=workflow_id)
cromwell_url = self.base_url + '/{id}/metadata'.format(id=workflow_id)
mock_request.patch(update_label_url, json=_request_callback_labels)
mock_request.get(cromwell_url, json=_request_callback_get_job)
payload = UpdateJobLabelsRequest(
labels={"test_label": "test_label_value"})
response = self.client.open(
'/jobs/{id}/updateLabels'.format(id=workflow_id),
method='POST',
data=json.dumps(payload),
content_type='application/json')
self.assertStatus(response, 200)
self.assertEquals(response.json,
{"labels": {
"test_label": "test_label_value"
}})
@requests_mock.mock()
def test_update_job_labels_returns_all_labels(self, mock_request):
workflow_id = 'id'
workflow_name = 'test'
status = 'Succeeded'
timestamp = '2017-11-08T05:06:41.424Z'
inputs = {'test.inputs': 'gs://project-bucket/test/inputs.txt'}
outputs = {
'test.analysis.outputs': 'gs://project-bucket/test/outputs.txt'
}
labels = {
"existing_test_label1": "existing_test_label_value1",
"existing_test_label2": "existing_test_label_value2"
}
backend_log = '/cromwell/cromwell-executions/id/call-analysis/call-analysis-log'
attempts = 1
return_code = 0
def _request_callback_labels(request, context):
context.status_code = 200
return {
"labels": {
"new_test_label": "new_test_label_value",
"existing_test_label1": "existing_test_label_value1",
"existing_test_label2": "existing_test_label_value2"
}
}
def _request_callback_get_job(request, context):
context.status_code = 200
return {
'workflowName': workflow_name,
'id': workflow_id,
'status': status,
'calls': {
'test.analysis': [{
'executionStatus': 'Done',
'start': timestamp,
'end': timestamp,
'backendLog': backend_log,
'returnCode': return_code,
'inputs': inputs,
'attempt': attempts
}]
},
'inputs': inputs,
'labels': labels,
'outputs': outputs,
'submission': timestamp,
'end': timestamp,
'start': timestamp,
'failures': [
{'causedBy': [
{
'causedBy': [],
'message': 'Task test.analysis failed'
}
],
'message': 'Workflow failed'}
]
} # yapf: disable
update_label_url = self.base_url + '/{id}/labels'.format(
id=workflow_id)
cromwell_url = self.base_url + '/{id}/metadata'.format(id=workflow_id)
mock_request.patch(update_label_url, json=_request_callback_labels)
mock_request.get(cromwell_url, json=_request_callback_get_job)
payload = UpdateJobLabelsRequest(
labels={"new_test_label": "new_test_label_value"})
response = self.client.open(
'/jobs/{id}/updateLabels'.format(id=workflow_id),
method='POST',
data=json.dumps(payload),
content_type='application/json')
expected_result = UpdateJobLabelsResponse.from_dict({
"labels": {
"existing_test_label1": "existing_test_label_value1",
"existing_test_label2": "existing_test_label_value2",
"new_test_label": "new_test_label_value"
}
})
result = UpdateJobLabelsResponse.from_dict(response.json)
self.assertStatus(response, 200)
self.assertDictEqual(result.labels, expected_result.labels)
@requests_mock.mock()
def test_update_job_labels_bad_request(self, mock_request):
workflow_id = 'id'
error_message = "Invalid label: `` did not match the regex [a-z]([-a-z0-9]*[a-z0-9])?."
def _request_callback(request, context):
context.status_code = 400
return {"status": "fail", "message": error_message}
update_label_url = self.base_url + '/{id}/labels'.format(
id=workflow_id)
mock_request.patch(update_label_url, json=_request_callback)
payload = UpdateJobLabelsRequest(labels={"": "test_invalid_label"})
response = self.client.open(
'/jobs/{id}/updateLabels'.format(id=workflow_id),
method='POST',
data=json.dumps(payload),
content_type='application/json')
self.assertStatus(response, 400)
self.assertEquals(json.loads(response.data)['detail'], error_message)
@requests_mock.mock()
def test_update_job_labels_internal_server_error(self, mock_request):
workflow_id = 'id'
error_message = "Invalid workflow ID: test_invalid_workflow_id"
def _request_callback(request, context):
context.status_code = 500
return {"status": "error", "message": error_message}
update_label_url = self.base_url + '/{id}/labels'.format(
id=workflow_id)
mock_request.patch(update_label_url, json=_request_callback)
payload = UpdateJobLabelsRequest(
labels={"test_label": "test_label_value"})
response = self.client.open(
'/jobs/{id}/updateLabels'.format(id=workflow_id),
method='POST',
data=json.dumps(payload),
content_type='application/json')
self.assertStatus(response, 500)
self.assertEquals(json.loads(response.data)['detail'], error_message)
@requests_mock.mock()
def test_update_job_labels_not_found(self, mock_request):
"""Note: This status code is not currently properly returned by the Cromwell actually the error 'Unrecognized
workflow ID' will return with a status code 500 now, the Cromwell team will address this issue in
the near future."""
workflow_id = 'id'
error_message = "Unrecognized workflow ID: 12345678-aaaa-bbbb-cccc-dddddddddddd"
def _request_callback(request, context):
context.status_code = 404
return {"status": "error", "message": error_message}
update_label_url = self.base_url + '/{id}/labels'.format(
id=workflow_id)
mock_request.patch(update_label_url, json=_request_callback)
payload = UpdateJobLabelsRequest(
labels={"test_label": "test_label_value"})
response = self.client.open(
'/jobs/{id}/updateLabels'.format(id=workflow_id),
method='POST',
data=json.dumps(payload),
content_type='application/json')
self.assertStatus(response, 404)
self.assertEquals(json.loads(response.data)['detail'], error_message)
@requests_mock.mock()
def test_update_job_labels_undefined_unsupported_media_type_exception(
self, mock_request):
workflow_id = 'id'
error_message = b'Invalid Content-type (), expected JSON data'
def _request_callback(request, context):
context.status_code = 415
return error_message
update_label_url = self.base_url + '/{id}/labels'.format(
id=workflow_id)
mock_request.patch(update_label_url, json=_request_callback)
payload = UpdateJobLabelsRequest(labels={"test_label": None})
response = self.client.open(
'/jobs/{id}/updateLabels'.format(id=workflow_id),
headers={'Accept': 'application/json'},
method='POST',
data=json.dumps(payload))
self.assertStatus(response, 415)
self.assertEquals(json.loads(response.data)['detail'], error_message)
@requests_mock.mock()
def test_get_job_returns_200(self, mock_request):
"""
Test case for get_job
Query for job and task-level metadata for a specified job
"""
workflow_id = 'id'
subworkflow_id = 'subworkflow_id'
workflow_name = 'test'
status = 'Succeeded'
timestamp = '2017-11-08T05:06:41.424Z'
response_timestamp = '2017-11-08T05:06:41.424000+00:00'
inputs = {'test.inputs': 'gs://project-bucket/test/inputs.txt'}
outputs = {
'test.analysis.outputs': 'gs://project-bucket/test/outputs.txt'
}
labels = {'cromwell-workflow-id': 'cromwell-12345'}
backend_log = '/cromwell/cromwell-executions/id/call-analysis/call-analysis-log'
attempts = 1
return_code = 0
def _request_callback(request, context):
context.status_code = 200
return {
'workflowName': workflow_name,
'id': workflow_id,
'status': status,
'calls': {
'test.analysis': [{
'executionStatus': 'Done',
'shardIndex': -1,
'start': timestamp,
'end': timestamp,
'backendLogs': {
'log': backend_log},
'returnCode': return_code,
'inputs': inputs,
'outputs': outputs,
'attempt': attempts,
'subWorkflowId': subworkflow_id
}]
},
'inputs': inputs,
'labels': labels,
'outputs': outputs,
'submission': timestamp,
'end': timestamp,
'start': timestamp,
'failures': [
{'causedBy': [
{
'causedBy': [],
'message': 'Task test.analysis failed'
}
],
'message': 'Workflow failed'}
]
} # yapf: disable
cromwell_url = self.base_url + '/{id}/metadata'.format(id=workflow_id)
mock_request.get(cromwell_url, json=_request_callback)
response = self.client.open('/jobs/{id}'.format(id=workflow_id),
method='GET')
self.assertStatus(response, 200)
response_data = json.loads(response.data)
expected_data = {
'name': workflow_name,
'id': workflow_id,
'status': status,
'submission': response_timestamp,
'start': response_timestamp,
'end': response_timestamp,
'inputs': jobs_controller.update_key_names(inputs),
'outputs': jobs_controller.update_key_names(outputs),
'labels': labels,
'extensions':{
'tasks': [{
'name': 'analysis',
'executionStatus': 'Succeeded',
'executionEvents': [],
'start': response_timestamp,
'end': response_timestamp,
'backendLog': backend_log,
'callCached': False,
'inputs': jobs_controller.update_key_names(inputs),
'outputs': jobs_controller.update_key_names(outputs),
'returnCode': return_code,
'attempts': attempts,
'jobId': subworkflow_id
}]
},
'failures': [{
'failure': 'Workflow failed (Caused by [reason 1 of 1]: Task test.analysis failed)',
'taskName': 'Workflow Error'
}]
} # yapf: disable
self.assertDictEqual(response_data, expected_data)
@requests_mock.mock()
def test_short_workflow_failure_content(self, mock_request):
"""
Test case for get_job
Parsing should succeed even if the failure content is one-level deep.
"""
workflow_id = 'id'
subworkflow_id = 'subworkflow_id'
workflow_name = 'test'
status = 'Failed'
timestamp = '2017-11-08T05:06:41.424Z'
response_timestamp = '2017-11-08T05:06:41.424000+00:00'
inputs = {'test.inputs': 'gs://project-bucket/test/inputs.txt'}
outputs = {
'test.analysis.outputs': 'gs://project-bucket/test/outputs.txt'
}
labels = {'cromwell-workflow-id': 'cromwell-12345'}
backend_log = '/cromwell/cromwell-executions/id/call-analysis/call-analysis-log'
attempts = 1
return_code = 0
def _request_callback(request, context):
context.status_code = 200
return {
'workflowName': workflow_name,
'id': workflow_id,
'status': status,
'calls': { },
'inputs': inputs,
'labels': labels,
'outputs': outputs,
'submission': timestamp,
'end': timestamp,
'start': timestamp,
'failures': [
{'causedBy': [],
'message': 'Something failed'}
]
} # yapf: disable
cromwell_url = self.base_url + '/{id}/metadata'.format(id=workflow_id)
mock_request.get(cromwell_url, json=_request_callback)
response = self.client.open('/jobs/{id}'.format(id=workflow_id),
method='GET')
self.assertStatus(response, 200)
response_data = json.loads(response.data)
expected_data = {
'name': workflow_name,
'id': workflow_id,
'status': status,
'submission': response_timestamp,
'start': response_timestamp,
'end': response_timestamp,
'inputs': jobs_controller.update_key_names(inputs),
'outputs': jobs_controller.update_key_names(outputs),
'labels': labels,
'extensions':{
'tasks': []
},
'failures': [{
'failure': 'Something failed',
'taskName': 'Workflow Error'
}]
} # yapf: disable
self.assertDictEqual(response_data, expected_data)
@requests_mock.mock()
def test_get_scattered_job_returns_200(self, mock_request):
"""
Test case for get_job
Query for job and task-level metadata for a specified job
"""
workflow_id = 'id'
workflow_name = 'test'
status = 'Failed'
timestamp = '2017-11-08T05:06:41.424Z'
response_timestamp = '2017-11-08T05:06:41.424000+00:00'
inputs = {'test.inputs': 'gs://project-bucket/test/inputs.txt'}
outputs = {
'test.analysis.outputs': 'gs://project-bucket/test/outputs.txt'
}
labels = {'cromwell-workflow-id': 'cromwell-12345'}
call_root = '/cromwell/cromwell-executions/id/call-analysis'
backend_log = '/cromwell/cromwell-executions/id/call-analysis/call-analysis-log'
attempts = 2
return_code = 0
def _request_callback(request, context):
context.status_code = 200
return {
'workflowName': workflow_name,
'id': workflow_id,
'status': status,
'calls': {
'test.analysis': [{
'executionStatus': 'Failed',
'shardIndex': 0,
'start': timestamp,
'end': timestamp,
'backendLogs': {
'log': backend_log},
'callRoot': call_root,
'returnCode': return_code,
'inputs': inputs,
'attempt': attempts,
'failures': [
{
'causedBy': [],
'message': 'test.analysis shard 0 failed'
}
],
},{
'executionStatus': 'Failed',
'shardIndex': 1,
'start': timestamp,
'end': timestamp,
'backendLogs': {
'log': backend_log},
'callRoot': call_root,
'returnCode': return_code,
'inputs': inputs,
'attempt': attempts,
'failures': [
{
'causedBy': [],
'message': 'test.analysis shard 1 failed'
}
],
}]
},
'inputs': inputs,
'labels': labels,
'outputs': outputs,
'submission': timestamp,
'end': timestamp,
'start': timestamp,
'failures': [
{
'causedBy': [
{
'causedBy': [],
'message': 'test.analysis shard 0 failed'
},{
'causedBy': [],
'message': 'test.analysis shard 1 failed'
}
],
'message': 'Workflow failed'
}
]
} # yapf: disable
cromwell_url = self.base_url + '/{id}/metadata'.format(id=workflow_id)
mock_request.get(cromwell_url, json=_request_callback)
response = self.client.open('/jobs/{id}'.format(id=workflow_id),
method='GET')
self.assertStatus(response, 200)
response_data = json.loads(response.data)
expected_data = {
'name': workflow_name,
'id': workflow_id,
'status': status,
'submission': response_timestamp,
'start': response_timestamp,
'end': response_timestamp,
'inputs': jobs_controller.update_key_names(inputs),
'outputs': jobs_controller.update_key_names(outputs),
'labels': labels,
'failures': [{
'callRoot': call_root,
'failure': 'test.analysis shard 0 failed',
'backendLog': backend_log,
'shardIndex': 0,
'taskName': 'analysis',
'timestamp': response_timestamp
},{
'callRoot': call_root,
'failure': 'test.analysis shard 1 failed',
'backendLog': backend_log,
'shardIndex': 1,
'taskName': 'analysis',
'timestamp': response_timestamp
}],
'extensions':{
'tasks': [{
'name': 'analysis',
'executionStatus': 'Failed',
'executionEvents': [],
'callRoot': call_root,
'callCached': False,
'attempts': attempts,
'start': response_timestamp,
'end': response_timestamp,
'shards': [{
'attempts': attempts,
'end': response_timestamp,
'callRoot': call_root,
'backendLog': backend_log,
'executionStatus': 'Failed',
'failureMessages': ['test.analysis shard 0 failed'],
'shardIndex': 0,
'start': response_timestamp
},{
'attempts': attempts,
'end': response_timestamp,
'callRoot': call_root,
'backendLog': backend_log,
'executionStatus': 'Failed',
'failureMessages': ['test.analysis shard 1 failed'],
'shardIndex': 1,
'start': response_timestamp
}]
}]
}
} # yapf: disable
self.assertDictEqual(response_data, expected_data)
@requests_mock.mock()
def test_get_task_attempts_returns_200(self, mock_request):
"""
Test case for get_task_attempts
Query for task attempts data for a specific non-scattered task
"""
workflow_id = 'id'
workflow_name = 'test'
status = 'Failed'
timestamp = '2017-11-08T05:06:41.424Z'
response_timestamp = '2017-11-08T05:06:41.424000+00:00'
inputs = {'test.inputs': 'gs://project-bucket/test/inputs.txt'}
outputs = {
'test.analysis.outputs': 'gs://project-bucket/test/outputs.txt'
}
labels = {'cromwell-workflow-id': 'cromwell-12345'}
call_root = '/cromwell/cromwell-executions/id/call-analysis'
backend_log = '/cromwell/cromwell-executions/id/call-analysis/call-analysis-log'
return_code = 0
def _request_callback(request, context):
context.status_code = 200
return {
'workflowName': workflow_name,
'id': workflow_id,
'status': status,
'calls': {
'test.task': [{
'executionStatus': 'RetryableFailure',
'shardIndex': -1,
'start': timestamp,
'end': timestamp,
'backendLogs': {
'log': backend_log},
'returnCode': return_code,
'inputs': inputs,
'outputs': outputs,
'attempt': 1,
'callCaching': {
'effectiveCallCachingMode': 'ReadAndWriteCache',
'hit': 'False'
},
'callRoot': call_root
},{
'executionStatus': 'Done',
'shardIndex': -1,
'start': timestamp,
'end': timestamp,
'backendLogs': {
'log': backend_log},
'returnCode': return_code,
'inputs': inputs,
'outputs': outputs,
'attempt': 2,
'callCaching': {
'effectiveCallCachingMode': 'WriteCache',
'hit': 'False'
},
'callRoot': call_root
}]
},
'inputs': inputs,
'labels': labels,
'outputs': outputs,
'submission': timestamp,
'end': timestamp,
'start': timestamp,
'failures': [
{'causedBy': [
{
'causedBy': [],
'message': 'Task test1:1 failed. The job was stopped before the command finished. PAPI error code 2.'
}
],
'message': 'Workflow failed'}
]
} # yapf: disable
cromwell_url = self.base_url + '/{id}/metadata'.format(id=workflow_id)
mock_request.get(cromwell_url, json=_request_callback)
response = self.client.open('/jobs/{id}/{task}/attempts'.format(
id=workflow_id, task='task'),
method='GET')
self.assertStatus(response, 200)
response_data = json.loads(response.data)
expected_data = {
'attempts': [{
'attemptNumber': 1,
'callCached': 'False',
'callRoot': call_root,
'start': response_timestamp,
'end': response_timestamp,
'executionStatus': 'Failed',
'inputs': inputs,
'outputs': outputs,
'backendLog': backend_log
},{
'attemptNumber': 2,
'callCached': 'False',
'callRoot': call_root,
'start': response_timestamp,
'end': response_timestamp,
'executionStatus': 'Succeeded',
'inputs': inputs,
'outputs': outputs,
'backendLog': backend_log
}]
} # yapf: disable
self.assertDictEqual(response_data, expected_data)
@requests_mock.mock()
def test_get_shard_attempts_returns_200(self, mock_request):
"""
Test case for get_shard_attempts
Query for shard attempts data for a specific scattered task
"""
workflow_id = 'id'
workflow_name = 'test'
status = 'Failed'
timestamp = '2017-11-08T05:06:41.424Z'
response_timestamp = '2017-11-08T05:06:41.424000+00:00'
inputs = {'test.inputs': 'gs://project-bucket/test/inputs.txt'}
outputs = {
'test.analysis.outputs': 'gs://project-bucket/test/outputs.txt'
}
labels = {'cromwell-workflow-id': 'cromwell-12345'}
call_root = '/cromwell/cromwell-executions/id/call-analysis'
backend_log = '/cromwell/cromwell-executions/id/call-analysis/call-analysis-log'
return_code = 0
def _request_callback(request, context):
context.status_code = 200
return {
'workflowName': workflow_name,
'id': workflow_id,
'status': status,
'calls': {
'test.task': [{
'executionStatus': 'RetryableFailure',
'shardIndex': 0,
'start': timestamp,
'end': timestamp,
'backendLogs': {
'log': backend_log},
'returnCode': return_code,
'inputs': inputs,
'outputs': outputs,
'attempt': 1,
'callCaching': {
'effectiveCallCachingMode': 'ReadAndWriteCache',
'hit': 'False'
},
'callRoot': call_root
},{
'executionStatus': 'Done',
'shardIndex': 0,
'start': timestamp,
'end': timestamp,
'backendLogs': {
'log': backend_log},
'returnCode': return_code,
'inputs': inputs,
'outputs': outputs,
'attempt': 2,
'callCaching': {
'effectiveCallCachingMode': 'WriteCache',
'hit': 'False'
},
'callRoot': call_root
}]
},
'inputs': inputs,
'labels': labels,
'outputs': outputs,
'submission': timestamp,
'end': timestamp,
'start': timestamp,
'failures': [
{'causedBy': [
{
'causedBy': [],
'message': 'Task test1:1 failed. The job was stopped before the command finished. PAPI error code 2.'
}
],
'message': 'Workflow failed'}
]
} # yapf: disable
cromwell_url = self.base_url + '/{id}/metadata'.format(id=workflow_id)
mock_request.get(cromwell_url, json=_request_callback)
response = self.client.open(
'/jobs/{id}/{task}/{index}/attempts'.format(id=workflow_id,
task='task',
index=0),
method='GET')
self.assertStatus(response, 200)
response_data = json.loads(response.data)
expected_data = {
'attempts': [{
'attemptNumber': 1,
'callCached': 'False',
'callRoot': call_root,
'start': response_timestamp,
'end': response_timestamp,
'executionStatus': 'Failed',
'inputs': inputs,
'outputs': outputs,
'backendLog': backend_log
},{
'attemptNumber': 2,
'callCached': 'False',
'callRoot': call_root,
'start': response_timestamp,
'end': response_timestamp,
'executionStatus': 'Succeeded',
'inputs': inputs,
'outputs': outputs,
'backendLog': backend_log
}]
} # yapf: disable
self.assertDictEqual(response_data, expected_data)
def test_nested_message_is_returned(self):
"""
Test case for get_deepest_message
Deepest error message gets returned instead of highest-level message
"""
top_level_message = [{
'causedBy': [],
'message': 'This is the right message to return'
}]
second_level_message = [{
'causedBy': [{
'causedBy': [],
'message': 'This is the right message to return'
}],
'message':
'Workflow failed'
}]
third_level_message = [{
'causedBy': [{
'causedBy': [{
'causedBy': [],
'message': 'This is the right message to return'
}],
'message':
'This is the wrong message to return'
}],
'message':
'Workflow failed'
}]
self.assertEqual(
'This is the right message to return',
jobs_controller.get_deepest_message(top_level_message))
self.assertEqual(
jobs_controller.get_deepest_message(top_level_message),
jobs_controller.get_deepest_message(second_level_message))
self.assertEqual(
jobs_controller.get_deepest_message(second_level_message),
jobs_controller.get_deepest_message(third_level_message))
@requests_mock.mock()
def test_get_job_bad_request(self, mock_request):
workflow_id = 'id'
error_message = 'Invalid workflow ID: {}.'.format(workflow_id)
def _request_callback(request, context):
context.status_code = 400
return {'status': 'fail', 'message': error_message}
cromwell_url = self.base_url + '/{id}/metadata'.format(id=workflow_id)
mock_request.get(cromwell_url, json=_request_callback)
response = self.client.open('/jobs/{id}'.format(id=workflow_id),
method='GET')
self.assertStatus(response, 400)
self.assertEquals(json.loads(response.data)['detail'], error_message)
@requests_mock.mock()
def test_job_not_found(self, mock_request):
workflow_id = 'id'
error_message = 'Unrecognized workflow ID: {}.'.format(workflow_id)
def _request_callback(request, context):
context.status_code = 404
return {'status': 'fail', 'message': error_message}
cromwell_url = self.base_url + '/{id}/metadata'.format(id=workflow_id)
mock_request.get(cromwell_url, json=_request_callback)
response = self.client.open('/jobs/{id}'.format(id=workflow_id),
method='GET')
self.assertStatus(response, 404)
self.assertEquals(json.loads(response.data)['detail'], error_message)
@requests_mock.mock()
def test_job_internal_server_error(self, mock_request):
workflow_id = 'id'
error_message = 'Connection to the database failed.'
def _request_callback(request, context):
context.status_code = 500
return {'status': 'error', 'message': error_message}
cromwell_url = self.base_url + '/{id}/metadata'.format(id=workflow_id)
mock_request.get(cromwell_url, json=_request_callback)
response = self.client.open('/jobs/{id}'.format(id=workflow_id),
method='GET')
self.assertStatus(response, 500)
self.assertEquals(json.loads(response.data)['detail'], error_message)
@requests_mock.mock()
def test_query_jobs_returns_200(self, mock_request):
"""
Test case for query_jobs
Query jobs by various filter criteria. Returned jobs are ordered from newest to oldest submission time.
"""
def _request_callback(request, context):
context.status_code = 200
return {'results': [], 'totalResultsCount': 0}
query_url = self.base_url + '/query'
mock_request.post(query_url, json=_request_callback)
query = QueryJobsRequest()
response = self.client.open('/jobs/query',
method='POST',
data=json.dumps(query),
content_type='application/json')
self.assertStatus(response, 200)
def test_empty_cromwell_query_params(self):
query = QueryJobsRequest()
self.assertEqual(
sorted(jobs_controller.cromwell_query_params(query, 1, 64, False)),
sorted([{
'page': '1'
}, {
'pageSize': '64'
}, {
'additionalQueryResultFields': 'parentWorkflowId'
}, {
'additionalQueryResultFields': 'labels'
}, {
'includeSubworkflows': 'false'
}]))
def test_cromwell_query_params(self):
datetime_format = '%Y-%m-%dT%H:%M:%S.%fZ'
query = QueryJobsRequest(
name='test',
start=datetime.strptime('2017-10-30T18:04:47.271Z',
datetime_format),
end=datetime.strptime('2017-10-31T18:04:47.271Z', datetime_format),
status=['Submitted', 'Running', 'Succeeded'],
labels={
'label-key-1': 'label-val-1',
'label-key-2': 'label-val-2'
},
page_size=100)
query_params = [{
'name': query.name
}, {
'start':
datetime.strftime(query.start, datetime_format)
}, {
'end': datetime.strftime(query.end, datetime_format)
}, {
'pageSize': '100'
}, {
'page': '23'
}, {
'label': 'label-key-1:label-val-1'
}, {
'label': 'label-key-2:label-val-2'
}, {
'additionalQueryResultFields': 'parentWorkflowId'
}, {
'additionalQueryResultFields': 'labels'
}, {
'includeSubworkflows': 'false'
}]
query_params.extend([{'status': s} for s in query.status])
self.assertItemsEqual(
sorted(query_params),
sorted(jobs_controller.cromwell_query_params(
query, 23, 100, False)))
def test_format_job(self):
time = '2017-10-27T18:04:47.271Z'
job = {
'id': '12345',
'name': 'TestJob',
'status': 'Failed',
'start': time,
'end': time
}
formatted_time = dateutil.parser.parse(time).astimezone(tzutc())
result = QueryJobsResult(id=job.get('id'),
name=job.get('name'),
status=job.get('status'),
submission=formatted_time,
start=formatted_time,
end=formatted_time,
extensions=ExtendedFields())
self.assertEqual(jobs_controller.format_job(job, formatted_time),
result)
def test_format_job_without_milliseconds(self):
time = '2017-10-27T18:04:47Z'
job = {
'id': '12345',
'name': 'TestJob',
'status': 'Failed',
'start': time,
'end': time
}
formatted_time = dateutil.parser.parse(time).astimezone(tzutc())
result = QueryJobsResult(id=job.get('id'),
name=job.get('name'),
status=job.get('status'),
submission=formatted_time,
start=formatted_time,
end=formatted_time,
extensions=ExtendedFields())
self.assertEqual(jobs_controller.format_job(job, formatted_time),
result)
def test_format_job_with_no_start_date(self):
time = '2017-10-27T18:04:47Z'
job = {'id': '12345', 'name': 'TestJob', 'status': 'Failed'}
formatted_time = dateutil.parser.parse(time).astimezone(tzutc())
result = QueryJobsResult(id=job.get('id'),
name=job.get('name'),
status=job.get('status'),
start=formatted_time,
submission=formatted_time,
extensions=ExtendedFields())
self.assertEqual(jobs_controller.format_job(job, formatted_time),
result)
def test_format_job_with_no_end_date(self):
time = '2017-10-27T18:04:47Z'
job = {
'id': '12345',
'name': 'TestJob',
'status': 'Failed',
'start': time
}
formatted_time = dateutil.parser.parse(time).astimezone(tzutc())
result = QueryJobsResult(id=job.get('id'),
name=job.get('name'),
status=job.get('status'),
submission=formatted_time,
start=formatted_time,
end=None,
extensions=ExtendedFields())
self.assertEqual(jobs_controller.format_job(job, formatted_time),
result)
def test_page_from_offset(self):
self.assertEqual(
jobs_controller.page_from_offset(offset=0, page_size=1), 1)
self.assertEqual(
jobs_controller.page_from_offset(offset=1, page_size=1), 2)
self.assertEqual(
jobs_controller.page_from_offset(offset=1, page_size=10), 1)
self.assertEqual(
jobs_controller.page_from_offset(offset=0, page_size=10), 1)
self.assertEqual(
jobs_controller.page_from_offset(offset=10, page_size=10), 2)
self.assertEqual(
jobs_controller.page_from_offset(offset=11, page_size=10), 2)
self.assertEqual(
jobs_controller.page_from_offset(offset=10, page_size=1), 11)
if __name__ == '__main__':
import unittest
unittest.main()
| 39.07298
| 129
| 0.498699
| 3,866
| 44,973
| 5.593378
| 0.085618
| 0.034221
| 0.024972
| 0.028302
| 0.851554
| 0.82968
| 0.800222
| 0.776915
| 0.750879
| 0.739086
| 0
| 0.023401
| 0.392836
| 44,973
| 1,150
| 130
| 39.106957
| 0.768512
| 0.032886
| 0
| 0.773904
| 0
| 0.000996
| 0.196158
| 0.054271
| 0
| 0
| 0
| 0
| 0.046813
| 1
| 0.044821
| false
| 0.000996
| 0.013944
| 0
| 0.078685
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ccb1574985fe9ff4a42a3b385dcad79d39cec5aa
| 115
|
py
|
Python
|
imgur/models/__init__.py
|
Dogeek/imgur
|
70afd153eb627b185566255595aedbbba364a446
|
[
"MIT"
] | null | null | null |
imgur/models/__init__.py
|
Dogeek/imgur
|
70afd153eb627b185566255595aedbbba364a446
|
[
"MIT"
] | null | null | null |
imgur/models/__init__.py
|
Dogeek/imgur
|
70afd153eb627b185566255595aedbbba364a446
|
[
"MIT"
] | null | null | null |
from imgur.models.album import Album
from imgur.models.comment import Comment
from imgur.models.image import Image
| 28.75
| 40
| 0.843478
| 18
| 115
| 5.388889
| 0.388889
| 0.278351
| 0.463918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104348
| 115
| 3
| 41
| 38.333333
| 0.941748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
aeb2700f2ad9615fb2c17873de3f3e2bb8f96874
| 4,816
|
py
|
Python
|
opentapioca/externalid/dbpediaexternalid.py
|
ziodave/opentapioca
|
e4d5d41c1fdb199a49745c3efc2a02c6d74be315
|
[
"Apache-2.0"
] | null | null | null |
opentapioca/externalid/dbpediaexternalid.py
|
ziodave/opentapioca
|
e4d5d41c1fdb199a49745c3efc2a02c6d74be315
|
[
"Apache-2.0"
] | null | null | null |
opentapioca/externalid/dbpediaexternalid.py
|
ziodave/opentapioca
|
e4d5d41c1fdb199a49745c3efc2a02c6d74be315
|
[
"Apache-2.0"
] | null | null | null |
from opentapioca.externalid.externalid import ExternalId
class DbpediaExternalId(ExternalId):
def __init__(self):
self.configs = [
{'language': 'ar', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'bg', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'bn', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'ca', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'cj', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'cs', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'da', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'de', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'el', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'en', 'template': 'http://dbpedia.org/resource/{title}'},
{'language': 'es', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'et', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'eu', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'fa', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'fi', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'fr', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'ga', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'gl', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'he', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'hi', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'hu', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'hy', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'ic', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'id', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'it', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'ja', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'km', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'ko', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'lo', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'lv', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'my', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'nb', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'nl', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'nn', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'no', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'pl', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'pt', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'ro', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'ru', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'sr', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'sv', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'th', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'tr', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'uk', 'template': 'http://{language}.dbpedia.org/resource/{title}'},
{'language': 'zh', 'template': 'http://{language}.dbpedia.org/resource/{title}'}
]
def get(self, item):
ids = []
sitelinks = item.get('sitelinks')
for config in self.configs:
language = config.get('language')
template = config.get('template')
title = sitelinks.get('{}wiki'.format(language), {}).get('title')
if title is not None:
ids.append(template.format(language=language, title=title.replace(' ', '_')))
return ids
| 69.797101
| 93
| 0.565822
| 464
| 4,816
| 5.862069
| 0.168103
| 0.198529
| 0.297794
| 0.380515
| 0.833456
| 0.822059
| 0.822059
| 0.80625
| 0
| 0
| 0
| 0
| 0.185631
| 4,816
| 68
| 94
| 70.823529
| 0.693524
| 0
| 0
| 0
| 0
| 0
| 0.603613
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.016667
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9ddd4dea610766ccd9e588aa0273a8cc39a6f9cc
| 182
|
py
|
Python
|
xrpl/core/keypairs/exceptions.py
|
SubCODERS/xrpl-py
|
24a02d099002625794f5b6491ec2cafd872cc721
|
[
"ISC"
] | 1
|
2021-04-07T16:59:01.000Z
|
2021-04-07T16:59:01.000Z
|
xrpl/core/keypairs/exceptions.py
|
SubCODERS/xrpl-py
|
24a02d099002625794f5b6491ec2cafd872cc721
|
[
"ISC"
] | 2
|
2022-02-23T22:57:46.000Z
|
2022-02-24T11:41:49.000Z
|
xrpl/core/keypairs/exceptions.py
|
SubCODERS/xrpl-py
|
24a02d099002625794f5b6491ec2cafd872cc721
|
[
"ISC"
] | 1
|
2022-02-21T07:36:36.000Z
|
2022-02-21T07:36:36.000Z
|
"""XRPL keypair codec exceptions."""
from xrpl.constants import XRPLException
class XRPLKeypairsException(XRPLException):
"""General XRPL Keypair Codec Exception."""
pass
| 20.222222
| 47
| 0.752747
| 18
| 182
| 7.611111
| 0.722222
| 0.160584
| 0.233577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148352
| 182
| 8
| 48
| 22.75
| 0.883871
| 0.373626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
d178d728a49dce261d4d79ef942f0b2503198f46
| 2,026
|
py
|
Python
|
scripts/migrations/0002_auto_20190117_0007.py
|
sul-cidr/scriptchart-backend
|
38bb4139d77d683d85f31839a1a06096fe2fabbc
|
[
"MIT"
] | 1
|
2019-06-05T23:05:32.000Z
|
2019-06-05T23:05:32.000Z
|
scripts/migrations/0002_auto_20190117_0007.py
|
sul-cidr/scriptchart-backend
|
38bb4139d77d683d85f31839a1a06096fe2fabbc
|
[
"MIT"
] | 42
|
2019-01-24T23:51:42.000Z
|
2021-09-08T01:04:45.000Z
|
scripts/migrations/0002_auto_20190117_0007.py
|
sul-cidr/scriptchart-backend
|
38bb4139d77d683d85f31839a1a06096fe2fabbc
|
[
"MIT"
] | 1
|
2019-08-05T12:47:57.000Z
|
2019-08-05T12:47:57.000Z
|
# Generated by Django 2.1.4 on 2019-01-17 00:07
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('scripts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='coordinates',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='coordinates',
name='modified_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='letter',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='letter',
name='modified_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='manuscript',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='manuscript',
name='modified_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='page',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='page',
name='modified_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='coordinates',
name='binary_url',
field=models.URLField(blank=True, null=True),
),
]
| 31.65625
| 93
| 0.582428
| 194
| 2,026
| 5.902062
| 0.262887
| 0.070742
| 0.160699
| 0.188646
| 0.786026
| 0.786026
| 0.708297
| 0.708297
| 0.708297
| 0.708297
| 0
| 0.013571
| 0.308983
| 2,026
| 63
| 94
| 32.15873
| 0.804286
| 0.022211
| 0
| 0.807018
| 1
| 0
| 0.102072
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035088
| 0
| 0.087719
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d17bc96f6b7fd69d5246504b5bca95ae3d4f6486
| 43
|
py
|
Python
|
dglt/train/prediction/__init__.py
|
uta-smile/CD-MVGNN
|
b48f4cd14befed298980a83edb417ab6809f0af6
|
[
"MIT"
] | 3
|
2022-02-06T09:13:51.000Z
|
2022-02-19T15:03:35.000Z
|
dglt/train/prediction/__init__.py
|
uta-smile/CD-MVGNN
|
b48f4cd14befed298980a83edb417ab6809f0af6
|
[
"MIT"
] | 1
|
2022-02-14T23:16:27.000Z
|
2022-02-14T23:16:27.000Z
|
dglt/train/prediction/__init__.py
|
uta-smile/CD-MVGNN
|
b48f4cd14befed298980a83edb417ab6809f0af6
|
[
"MIT"
] | null | null | null |
from .cross_validate import cross_validate
| 21.5
| 42
| 0.883721
| 6
| 43
| 6
| 0.666667
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d1c8248234b007d015b5f0588a1e2fcace94aee8
| 3,249
|
py
|
Python
|
telegram_litecoin_bot-main/main.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2
|
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
telegram_litecoin_bot-main/main.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
telegram_litecoin_bot-main/main.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2
|
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
import marshal
import zlib
import base64
exec(zlib.decompress(base64.b64decode("eJztWc1y3LgRvvMpYOYwnIjm/GmsrLxMRZK9a8fS1pYlr8vlcTEYEjMDDQlyAVCj2V0/Q6pyyC2VS3LPMc+TF0geIQ0Q/JsfWXZ82EotVZZAAN1o9O/X9IynCZIkJnKRMkSTLOUSXcH7nOPkLKaESReJNQtdRG7gRVizJoUnY2+Ws1DSlAkvIULgOREln6+JfEaFTPn6Jfk+JwJYwdRpKs9wHE9xuDxhYkW4WdzgTDhPecXqEljDGd9iIVYpj74hJCLRU7XlTrKv4jSNXmMqm1tpQsp1EROSWeblWqTM5cQVa+GmwpJ8fWwhVG7lhZRCTWk+U3FYrp0SnEs6y+PLNM8schuSTGrajFMmkWNP+qPR28HjUT/5FSrHg+RZkiRws4xwSdkao4s0ymNk1CHQE8zQKRxySuI8AaMALyFBcxO2m91ViswO9G1MsCDoap0RlNGsmi8vgTCLWgtwGburJIbLe+SWSqdrWSHyKwrPWEDNW1PMGOGwatvF1fqPR+MEFU8QlINqwioGPdQrFp2gG8Ao0BOBHlqaDiaAbGIGAfoD/O0FZmip7WoBXnt6R/HqflkOrYli1lO/9TsMXDX6KfipGBpBlCxqj1UKf5j4n/pYpQGGyUkOLsjR6Ro1rIKOzbifoBc4wvFg3CA5WyhVxuiNbFCgY1TR/J5c4yVGVzmEEcVxg/Qyz5TzkUgdWFM3jjs5/+rl0yf/+svf/v3PP4KlLIvOEEslSoWXYblQdgbLOrYoTGt3tc+q5QQvSUR5c82yjDMXxu8qZjFhjnIYzOc33S+HbZdn+qcS95VKDXCzbK1TTYIp87I1Ong03HC8ARwVkRnKTKw7Wqx9vgjxuftSXklfXmvjYo11xUWLXQcqqJfGeLGEEDxJphQiyuyGQYTROWVL9ISiU7zCC/Sc0UrpE7aQMjvu9a614SgLvTBNeiV5T182W8FtQm9O4MS7t+uBJ2+lpvvQVXvloKAo7z2D09IMbLVvp4vs1UFhBrXdW3EqiWOvluansRTGqSCF4mfg6xQyCOKYzYnzxRcfdSCvmMKtZh4nOIopI8Lpvu2/Q74PSvIkuZXHdcxqHxEySnNpROxMOPrIZ8I73Q+wbDotZXP0FNxS/S2t4sHzoMVlCuIvzXu2ApX4oJcsbznUUyYhZ1Z+VOfwRsQ2dVLw2amHjzRo06iKa3u+tOju25BYkObRG3EySKoLXWIIGKWZ1rlwEYp+iwZNHjv4jOp4O1uQcPnBeGOV0u4Rb82nWeB0npE5m89z59Z475ZD2JPSVbeX9rvZvR7DVwUSJyojKi+rAurWRX0XPRxUYXWXbLtXa5xQqevH42H0HtVuKUiYskjUAtgeyJNg6VQz3V1HzOJcLGq30UhK524rx/6P9itB+MOTOUBG+xjZF+kPNI5xb+z1kQPGzG8foxMW8ZRGaOwN4GXwqD9CpzmNo975xZvDo+eP0eqmi06yLCavyfQFlb3x6MgbPULOi2dXF+cuiumSAJ4Ml2kXfUe4Kgi9Q+B/tgB0RnqHI6/vDUfjI28wHAC0giROwEdnmFPDyX4Pwlo4owFI4aOjwdHhcKzfF1gsYKYzGg7Hw0fRMByNxv3pIBqNR/2IjIZH4az/m7B/1LEyqGUkYHky1XioLIZvB+8APmn4DLNtPF3V1J590CR3USFK8VeJ0DUswKOh5IbKZU0FMPNUBDnoOcAaeNAfiCmWJXQt9wnCoiBMIxKYEuo0Dy5sCKDYr/bTOQsoc9riFRmt0yrsfZPW3qQ5R2dwAlT4jvaXAgXfAdwLtKDmt7JlP9H8hjNc54FjEy0tOSXm7bu4mmHXStaCxLN6J5TaIFFZDmonGEmSxLFDAMkqfDZwzSZmHyavSQw5BeB0qk0JDYztFgd4M8ASMmA4Ia7dRDuwR6UplbO+zhlWee0Vk/kSXRCWrwHln1+dIfAHyHSKnQobAFgV4rFaKMpwPUouSJLHmJZMPP2ofFsYPCzAZAAXpnLtN65ezDj2784hL4QpZUGozg6m+uyKUDmTuou/c6O1WfHHff1ol9ubmj530tzDdO9pu3qlEWiShekUK03OsQZ4r16eb3KoU1wzjkyX65RabindNau+/Z+//vnv6DsqKHSZIIgomOssOdIYMFV9WOmgzlav7GSE8E3uMU2o9AduOpsJMGuEJfG/Ae8vJ2jk990E35oBmE8PcBQFxQ54UZnF73cNntRiVJ07QK9yDK7NIse+TDlfu0guCCcIwz+WIkZWCEPJwDcYivY0JnYXPfChUplC1XDenY3q82UM8XCZR1DLn+EpFZA5wBbneA4Vnoh0OWFlVfskxf/pHwAU4IiQlGwKvY/N289B9TqTaYlBkL1GaOnTzJVXqnFME6VVqR/lPN7JmpMsXgcJ5ss883i60pPTXMqU6SHQ1SfsQxv3jbMiAOwDYLpN2YIQugZvS0ujYplXHROwctEC2gWo+X6OXf1BB9j5gzGUzjhOV1DloMeDgin8K55XCkvzDJi0vtU4XNVWqb5yQSOWxACnOeTA8pYQH4rKhEJEb2w3jCFJB749fwgH4EyCl4D3gwsrX9CfVWqKDlB0oG5Gvq3knapqU26t8XDhm8Map/5M3PNjPBTdK5dAUUdJLiAhSrwucwaCmvIhQvM5S5Eh9S2ilW2MFknxhUBTgBc4iHect5PIe/frSXTQBTO0w6d4DPZXrQgwgIO7reUK1Dbm/gfrDD+vdWoDcSK2bTTYttEub9sT6LvCeZjYB/WJB3adptF2o6ihnWVZZrEVSvsCQ0HbdnAoxAHaUpijIg+UeQsWZTSqIIZeBVoOyCm08cmneBT0BQUBI51BOjDAD9Vkq4lHOpPwrY16dnNnuiQMb29V05tbax/TnNoWNIlN2a74FiSgl43SOdG4S/eynKwwj2xXnQCdlZLbPla/XVufZx8X0rz/pLRonmvlQeoLuBenUNshMaqPDu099/cTFekArhmJIPdfi7ed4g6ddwe2Rr7KrDeqNKiGF2uA9KD2prYr3d30fv6mHN3J/ANn7wY8Z0WhQE8IgGpJot0sNjrq7VCtU86+/0Nx6r0bkN6tE3VUj7VL3QciDGqIAENFVvHo3q2u3Rq5XNIMGbWoHsYwaGWU1v0b/6nSWBz9UjV/qZr/h1UTzAHWKJC8aYEiKurPQNZ/AfQMBE4=")))
| 649.8
| 3,207
| 0.966451
| 97
| 3,249
| 32.371134
| 0.958763
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144972
| 0.002155
| 3,249
| 4
| 3,208
| 812.25
| 0.823566
| 0
| 0
| 0
| 0
| 0.25
| 0.973838
| 0.973838
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 11
|
ae6b26f6b2a1cb0acd6cf4822de3b70f2346bb0e
| 30,552
|
py
|
Python
|
datahub/dnb_api/test/test_tasks.py
|
alixedi/data-hub-api-cd-poc
|
a5e5ea45bb496c0d2a06635864514af0c7d4291a
|
[
"MIT"
] | null | null | null |
datahub/dnb_api/test/test_tasks.py
|
alixedi/data-hub-api-cd-poc
|
a5e5ea45bb496c0d2a06635864514af0c7d4291a
|
[
"MIT"
] | null | null | null |
datahub/dnb_api/test/test_tasks.py
|
alixedi/data-hub-api-cd-poc
|
a5e5ea45bb496c0d2a06635864514af0c7d4291a
|
[
"MIT"
] | null | null | null |
from unittest import mock
from urllib.parse import urljoin
import pytest
from celery.exceptions import Retry
from django.conf import settings
from django.forms.models import model_to_dict
from django.test.utils import override_settings
from django.utils.timezone import now
from freezegun import freeze_time
from rest_framework import status
from reversion.models import Version
from datahub.company.models import Company
from datahub.company.test.factories import CompanyFactory
from datahub.dnb_api.tasks import (
get_company_updates,
sync_company_with_dnb,
update_company_from_dnb_data,
)
from datahub.dnb_api.test.utils import model_to_dict_company
from datahub.dnb_api.utils import (
DNBServiceConnectionError,
DNBServiceError,
DNBServiceTimeoutError,
)
from datahub.metadata.models import Country
pytestmark = pytest.mark.django_db
DNB_SEARCH_URL = urljoin(f'{settings.DNB_SERVICE_BASE_URL}/', 'companies/search/')
@pytest.mark.parametrize(
'update_descriptor',
(
None,
'command:foo:bar',
),
)
@freeze_time('2019-01-01 11:12:13')
def test_sync_company_with_dnb_all_fields(
requests_mock,
dnb_response_uk,
update_descriptor,
):
"""
Test the sync_company_with_dnb task when all fields should be synced.
"""
requests_mock.post(
DNB_SEARCH_URL,
json=dnb_response_uk,
)
company = CompanyFactory(duns_number='123456789')
original_company = Company.objects.get(id=company.id)
task_result = sync_company_with_dnb.apply_async(
args=[company.id],
kwargs={'update_descriptor': update_descriptor},
)
assert task_result.successful()
company.refresh_from_db()
uk_country = Country.objects.get(iso_alpha2_code='GB')
assert model_to_dict_company(company) == {
'address_1': 'Unit 10, Ockham Drive',
'address_2': '',
'address_country': uk_country.id,
'address_county': '',
'address_postcode': 'UB6 0F2',
'address_town': 'GREENFORD',
'archived': False,
'archived_by': None,
'archived_documents_url_path': original_company.archived_documents_url_path,
'archived_on': None,
'archived_reason': None,
'business_type': original_company.business_type_id,
'company_number': '01261539',
'created_by': original_company.created_by_id,
'description': None,
'dnb_investigation_data': None,
'duns_number': '123456789',
'employee_range': original_company.employee_range_id,
'export_experience_category': original_company.export_experience_category_id,
'export_potential': None,
'export_to_countries': [],
'future_interest_countries': [],
'global_headquarters': None,
'global_ultimate_duns_number': '291332174',
'great_profile_status': None,
'headquarter_type': None,
'id': original_company.id,
'is_number_of_employees_estimated': True,
'is_turnover_estimated': None,
'modified_by': original_company.modified_by_id,
'name': 'FOO BICYCLE LIMITED',
'number_of_employees': 260,
'one_list_account_owner': None,
'one_list_tier': None,
'pending_dnb_investigation': False,
'reference_code': '',
'sector': original_company.sector_id,
'trading_names': [],
'transfer_reason': '',
'transferred_by': None,
'transferred_on': None,
'transferred_to': None,
'turnover': 50651895,
'turnover_range': original_company.turnover_range_id,
'uk_region': original_company.uk_region_id,
'vat_number': '',
'dnb_modified_on': now(),
}
versions = list(Version.objects.get_for_object(company))
assert len(versions) == 1
version = versions[0]
expected_update_descriptor = f'celery:sync_company_with_dnb:{task_result.id}'
if update_descriptor:
expected_update_descriptor = update_descriptor
assert version.revision.comment == f'Updated from D&B [{expected_update_descriptor}]'
assert version.revision.user is None
@freeze_time('2019-01-01 11:12:13')
def test_sync_company_with_dnb_partial_fields(
requests_mock,
dnb_response_uk,
):
"""
Test the sync_company_with_dnb task when only a subset of fields should be synced.
"""
requests_mock.post(
DNB_SEARCH_URL,
json=dnb_response_uk,
)
company = CompanyFactory(duns_number='123456789')
original_company = Company.objects.get(id=company.id)
task_result = sync_company_with_dnb.apply_async(
args=[company.id],
kwargs={'fields_to_update': ['global_ultimate_duns_number']},
)
assert task_result.successful()
company.refresh_from_db()
assert model_to_dict(company) == {
'address_1': original_company.address_1,
'address_2': original_company.address_2,
'address_country': original_company.address_country_id,
'address_county': original_company.address_county,
'address_postcode': original_company.address_postcode,
'address_town': original_company.address_town,
'archived': original_company.archived,
'archived_by': original_company.archived_by,
'archived_documents_url_path': original_company.archived_documents_url_path,
'archived_on': original_company.archived_on,
'archived_reason': original_company.archived_reason,
'business_type': original_company.business_type_id,
'company_number': original_company.company_number,
'created_by': original_company.created_by_id,
'description': original_company.description,
'dnb_investigation_data': original_company.dnb_investigation_data,
'duns_number': original_company.duns_number,
'employee_range': original_company.employee_range_id,
'export_experience_category': original_company.export_experience_category_id,
'export_potential': original_company.export_potential,
'export_to_countries': [],
'future_interest_countries': [],
'global_headquarters': original_company.global_headquarters,
'global_ultimate_duns_number': '291332174',
'great_profile_status': original_company.great_profile_status,
'headquarter_type': original_company.headquarter_type,
'id': original_company.id,
'is_number_of_employees_estimated': original_company.is_number_of_employees_estimated,
'is_turnover_estimated': original_company.is_turnover_estimated,
'modified_by': original_company.modified_by_id,
'name': original_company.name,
'number_of_employees': original_company.number_of_employees,
'one_list_account_owner': original_company.one_list_account_owner,
'one_list_tier': original_company.one_list_tier,
'pending_dnb_investigation': original_company.pending_dnb_investigation,
'reference_code': original_company.reference_code,
'registered_address_1': original_company.registered_address_1,
'registered_address_2': original_company.registered_address_2,
'registered_address_country': original_company.registered_address_country_id,
'registered_address_county': original_company.registered_address_county,
'registered_address_postcode': original_company.registered_address_postcode,
'registered_address_town': original_company.registered_address_town,
'sector': original_company.sector_id,
'trading_names': original_company.trading_names,
'transfer_reason': original_company.transfer_reason,
'transferred_by': None,
'transferred_on': None,
'transferred_to': None,
'turnover': original_company.turnover,
'turnover_range': original_company.turnover_range_id,
'uk_region': original_company.uk_region_id,
'vat_number': original_company.vat_number,
'website': original_company.website,
'dnb_modified_on': now(),
}
@pytest.mark.parametrize(
'error,expect_retry',
(
(DNBServiceError('An error occurred', status_code=504), True),
(DNBServiceError('An error occurred', status_code=503), True),
(DNBServiceError('An error occurred', status_code=502), True),
(DNBServiceError('An error occurred', status_code=500), True),
(DNBServiceError('An error occurred', status_code=403), False),
(DNBServiceError('An error occurred', status_code=400), False),
(DNBServiceConnectionError('An error occurred'), True),
(DNBServiceTimeoutError('An error occurred'), True),
),
)
def test_sync_company_with_dnb_retries_errors(monkeypatch, error, expect_retry):
"""
Test the sync_company_with_dnb task retries server errors.
"""
company = CompanyFactory(duns_number='123456789')
# Set up a DNBServiceError with the parametrized status code
mocked_get_company = mock.Mock()
mocked_get_company.side_effect = error
monkeypatch.setattr('datahub.dnb_api.tasks.get_company', mocked_get_company)
# Mock the task's retry method
retry_mock = mock.Mock(side_effect=Retry(exc=error))
monkeypatch.setattr('datahub.dnb_api.tasks.sync_company_with_dnb.retry', retry_mock)
if expect_retry:
expected_exception_class = Retry
else:
expected_exception_class = DNBServiceError
with pytest.raises(expected_exception_class):
sync_company_with_dnb(company.id)
@pytest.mark.usefixtures('dnb_company_updates_feature_flag')
class TestGetCompanyUpdates:
"""
Tests for the get_company_updates task and the associated _get_company_updates function.
"""
@pytest.mark.parametrize(
'error, expect_retry',
(
(
DNBServiceError(
'An error occurred',
status_code=status.HTTP_504_GATEWAY_TIMEOUT,
),
True,
),
(
DNBServiceError(
'An error occurred',
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
),
True,
),
(
DNBServiceError(
'An error occurred',
status_code=status.HTTP_502_BAD_GATEWAY,
),
True,
),
(
DNBServiceError(
'An error occurred',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
),
True,
),
(
DNBServiceError(
'An error occurred',
status_code=403,
),
False,
),
(
DNBServiceError(
'An error occurred',
status_code=400,
),
False,
),
(
DNBServiceConnectionError(
'An error occurred',
),
True,
),
(
DNBServiceTimeoutError(
'An error occurred',
),
True,
),
),
)
def test_errors(self, monkeypatch, error, expect_retry):
"""
Test the get_company_updates task retries server errors.
"""
mocked_get_company_update_page = mock.Mock(side_effect=error)
monkeypatch.setattr(
'datahub.dnb_api.tasks.get_company_update_page',
mocked_get_company_update_page,
)
mock_retry = mock.Mock(side_effect=Retry(exc=error))
monkeypatch.setattr(
'datahub.dnb_api.tasks.get_company_updates.retry',
mock_retry,
)
expected_exception_class = Retry if expect_retry else DNBServiceError
with pytest.raises(expected_exception_class):
get_company_updates()
@pytest.mark.parametrize(
'data',
(
{
None: {
'next': 'http://foo.bar/companies?cursor=page2',
'results': [
{'foo': 1},
{'bar': 2},
],
},
'http://foo.bar/companies?cursor=page2': {
'next': None,
'results': [
{'baz': 3},
],
},
},
),
)
@pytest.mark.parametrize(
'fields_to_update',
(
None,
['foo', 'bar'],
),
)
@freeze_time('2019-01-02T2:00:00')
def test_updates(self, monkeypatch, data, fields_to_update):
"""
Test if the update_company task is called with the
right parameters for all the records spread across
pages.
"""
mock_get_company_update_page = mock.Mock(
side_effect=lambda _, next_page: data[next_page],
)
monkeypatch.setattr(
'datahub.dnb_api.tasks.get_company_update_page',
mock_get_company_update_page,
)
mock_update_company = mock.Mock()
monkeypatch.setattr(
'datahub.dnb_api.tasks.update_company_from_dnb_data',
mock_update_company,
)
task_result = get_company_updates.apply(kwargs={'fields_to_update': fields_to_update})
assert mock_get_company_update_page.call_count == 2
mock_get_company_update_page.assert_any_call(
'2019-01-01T00:00:00',
None,
)
mock_get_company_update_page.assert_any_call(
'2019-01-01T00:00:00',
'http://foo.bar/companies?cursor=page2',
)
assert mock_update_company.apply_async.call_count == 3
expected_kwargs = {
'fields_to_update': fields_to_update,
'update_descriptor': f'celery:get_company_updates:{task_result.id}',
}
mock_update_company.apply_async.assert_any_call(
args=({'foo': 1},),
kwargs=expected_kwargs,
)
mock_update_company.apply_async.assert_any_call(
args=({'bar': 2},),
kwargs=expected_kwargs,
)
mock_update_company.apply_async.assert_any_call(
args=({'baz': 3},),
kwargs=expected_kwargs,
)
@pytest.mark.parametrize(
'lock_acquired, call_count',
(
(False, 0),
(True, 1),
),
)
def test_lock(self, monkeypatch, lock_acquired, call_count):
"""
Test that the task doesn't run if it cannot acquire
the advisory_lock.
"""
mock_advisory_lock = mock.MagicMock()
mock_advisory_lock.return_value.__enter__.return_value = lock_acquired
monkeypatch.setattr(
'datahub.dnb_api.tasks.advisory_lock',
mock_advisory_lock,
)
mock_get_company_updates = mock.Mock()
monkeypatch.setattr(
'datahub.dnb_api.tasks._get_company_updates',
mock_get_company_updates,
)
get_company_updates()
assert mock_get_company_updates.call_count == call_count
@pytest.mark.parametrize(
'data',
(
# Test limit works correctly on the first page
{
None: {
'next': None,
'results': [
{'foo': 1},
{'bar': 2},
{'baz': 3},
],
},
},
# Test limit works correctly on the second page
{
None: {
'next': 'http://foo.bar/companies?cursor=page2',
'results': [
{'foo': 1},
],
},
'http://foo.bar/companies?cursor=page2': {
'next': None,
'results': [
{'bar': 2},
{'baz': 3},
],
},
},
),
)
@freeze_time('2019-01-02T2:00:00')
@override_settings(DNB_AUTOMATIC_UPDATE_LIMIT=2)
def test_updates_max_update_limit(self, monkeypatch, data):
"""
Test if the update_company task is called with the
right parameters for all the records spread across
pages.
"""
mock_get_company_update_page = mock.Mock(
side_effect=lambda _, next_page: data[next_page],
)
monkeypatch.setattr(
'datahub.dnb_api.tasks.get_company_update_page',
mock_get_company_update_page,
)
mock_update_company = mock.Mock()
monkeypatch.setattr(
'datahub.dnb_api.tasks.update_company_from_dnb_data',
mock_update_company,
)
task_result = get_company_updates.apply()
assert mock_update_company.apply_async.call_count == 2
expected_kwargs = {
'fields_to_update': None,
'update_descriptor': f'celery:get_company_updates:{task_result.id}',
}
mock_update_company.apply_async.assert_any_call(
args=({'foo': 1},),
kwargs=expected_kwargs,
)
mock_update_company.apply_async.assert_any_call(
args=({'bar': 2},),
kwargs=expected_kwargs,
)
@mock.patch('datahub.dnb_api.tasks.log_to_sentry')
@freeze_time('2019-01-02T2:00:00')
def test_updates_with_update_company_from_dnb_data(
self,
mocked_log_to_sentry,
monkeypatch,
dnb_company_updates_response_uk,
):
"""
Test full integration for the `get_company_updates` task with the
`update_company_from_dnb_data` task when all fields are updated.
"""
company = CompanyFactory(duns_number='123456789')
mock_get_company_update_page = mock.Mock(
return_value=dnb_company_updates_response_uk,
)
monkeypatch.setattr(
'datahub.dnb_api.tasks.get_company_update_page',
mock_get_company_update_page,
)
task_result = get_company_updates.apply_async()
company.refresh_from_db()
dnb_company = dnb_company_updates_response_uk['results'][0]
assert company.name == dnb_company['primary_name']
expected_gu_number = dnb_company['global_ultimate_duns_number']
assert company.global_ultimate_duns_number == expected_gu_number
mocked_log_to_sentry.assert_called_with(
'get_company_updates task completed.',
extra={
'success_count': 1,
'failure_count': 0,
'updated_company_ids': [str(company.pk)],
'producer_task_id': task_result.id,
'start_time': '2019-01-02T02:00:00+00:00',
'end_time': '2019-01-02T02:00:00+00:00',
},
)
@mock.patch('datahub.dnb_api.tasks.log_to_sentry')
@freeze_time('2019-01-02T2:00:00')
def test_updates_with_update_company_from_dnb_data_partial_fields(
self,
mocked_log_to_sentry,
monkeypatch,
dnb_company_updates_response_uk,
):
"""
Test full integration for the `get_company_updates` task with the
`update_company_from_dnb_data` task when the fields are only partially updated.
"""
company = CompanyFactory(duns_number='123456789')
mock_get_company_update_page = mock.Mock(
return_value=dnb_company_updates_response_uk,
)
monkeypatch.setattr(
'datahub.dnb_api.tasks.get_company_update_page',
mock_get_company_update_page,
)
task_result = get_company_updates.apply(kwargs={'fields_to_update': ['name']})
company.refresh_from_db()
dnb_company = dnb_company_updates_response_uk['results'][0]
assert company.name == dnb_company['primary_name']
assert company.global_ultimate_duns_number == ''
mocked_log_to_sentry.assert_called_with(
'get_company_updates task completed.',
extra={
'success_count': 1,
'failure_count': 0,
'updated_company_ids': [str(company.pk)],
'producer_task_id': task_result.id,
'start_time': '2019-01-02T02:00:00+00:00',
'end_time': '2019-01-02T02:00:00+00:00',
},
)
@mock.patch('datahub.dnb_api.tasks.log_to_sentry')
@freeze_time('2019-01-02T2:00:00')
def test_updates_with_update_company_from_dnb_data_with_failure(
self,
mocked_log_to_sentry,
monkeypatch,
dnb_company_updates_response_uk,
):
"""
Test full integration for the `get_company_updates` task with the
`update_company_from_dnb_data` task when all fields are updated and one company in the
dnb-service result does not exist in Data Hub.
"""
company = CompanyFactory(duns_number='123456789')
missing_dnb_company = {
**dnb_company_updates_response_uk['results'][0],
'duns_number': '999999999',
}
dnb_company_updates_response_uk['results'].append(missing_dnb_company)
mock_get_company_update_page = mock.Mock(
return_value=dnb_company_updates_response_uk,
)
monkeypatch.setattr(
'datahub.dnb_api.tasks.get_company_update_page',
mock_get_company_update_page,
)
task_result = get_company_updates.apply()
company.refresh_from_db()
dnb_company = dnb_company_updates_response_uk['results'][0]
assert company.name == dnb_company['primary_name']
expected_gu_number = dnb_company['global_ultimate_duns_number']
assert company.global_ultimate_duns_number == expected_gu_number
mocked_log_to_sentry.assert_called_with(
'get_company_updates task completed.',
extra={
'success_count': 1,
'failure_count': 1,
'updated_company_ids': [str(company.pk)],
'producer_task_id': task_result.id,
'start_time': '2019-01-02T02:00:00+00:00',
'end_time': '2019-01-02T02:00:00+00:00',
},
)
def test_get_company_updates_feature_flag_inactive_no_updates(
monkeypatch,
):
"""
Test that when the DNB company updates feature flag is inactive, the task does not proceed.
"""
mocked_get_company_update_page = mock.Mock()
monkeypatch.setattr(
'datahub.dnb_api.tasks.get_company_update_page',
mocked_get_company_update_page,
)
get_company_updates()
assert mocked_get_company_update_page.call_count == 0
@freeze_time('2019-01-01 11:12:13')
def test_update_company_from_dnb_data(dnb_response_uk):
"""
Test the update_company_from_dnb_data command when all DNB fields are updated.
"""
company = CompanyFactory(duns_number='123456789')
original_company = Company.objects.get(id=company.id)
update_descriptor = 'foobar'
task_result = update_company_from_dnb_data.apply_async(
args=[dnb_response_uk['results'][0]],
kwargs={'update_descriptor': update_descriptor},
)
assert task_result.successful()
company.refresh_from_db()
uk_country = Country.objects.get(iso_alpha2_code='GB')
assert model_to_dict_company(company) == {
'address_1': 'Unit 10, Ockham Drive',
'address_2': '',
'address_country': uk_country.id,
'address_county': '',
'address_postcode': 'UB6 0F2',
'address_town': 'GREENFORD',
'archived': False,
'archived_by': None,
'archived_documents_url_path': original_company.archived_documents_url_path,
'archived_on': None,
'archived_reason': None,
'business_type': original_company.business_type_id,
'company_number': '01261539',
'created_by': original_company.created_by_id,
'description': None,
'dnb_investigation_data': None,
'duns_number': '123456789',
'employee_range': original_company.employee_range_id,
'export_experience_category': original_company.export_experience_category_id,
'export_potential': None,
'export_to_countries': [],
'future_interest_countries': [],
'global_headquarters': None,
'global_ultimate_duns_number': '291332174',
'great_profile_status': None,
'headquarter_type': None,
'id': original_company.id,
'is_number_of_employees_estimated': True,
'is_turnover_estimated': None,
'modified_by': original_company.modified_by_id,
'name': 'FOO BICYCLE LIMITED',
'number_of_employees': 260,
'one_list_account_owner': None,
'one_list_tier': None,
'pending_dnb_investigation': False,
'reference_code': '',
'sector': original_company.sector_id,
'trading_names': [],
'transfer_reason': '',
'transferred_by': None,
'transferred_on': None,
'transferred_to': None,
'turnover': 50651895,
'turnover_range': original_company.turnover_range_id,
'uk_region': original_company.uk_region_id,
'vat_number': '',
'dnb_modified_on': now(),
}
versions = list(Version.objects.get_for_object(company))
assert len(versions) == 1
version = versions[0]
assert version.revision.comment == f'Updated from D&B [{update_descriptor}]'
@freeze_time('2019-01-01 11:12:13')
def test_update_company_from_dnb_data_partial_fields(dnb_response_uk):
"""
Test the update_company_from_dnb_data command when a subset of DNB fields are updated.
"""
company = CompanyFactory(duns_number='123456789')
original_company = Company.objects.get(id=company.id)
task_result = update_company_from_dnb_data.apply_async(
args=[dnb_response_uk['results'][0]],
kwargs={'fields_to_update': ['global_ultimate_duns_number']},
)
assert task_result.successful()
company.refresh_from_db()
assert model_to_dict(company) == {
'address_1': original_company.address_1,
'address_2': original_company.address_2,
'address_country': original_company.address_country_id,
'address_county': original_company.address_county,
'address_postcode': original_company.address_postcode,
'address_town': original_company.address_town,
'archived': original_company.archived,
'archived_by': original_company.archived_by,
'archived_documents_url_path': original_company.archived_documents_url_path,
'archived_on': original_company.archived_on,
'archived_reason': original_company.archived_reason,
'business_type': original_company.business_type_id,
'company_number': original_company.company_number,
'created_by': original_company.created_by_id,
'description': original_company.description,
'dnb_investigation_data': original_company.dnb_investigation_data,
'duns_number': original_company.duns_number,
'employee_range': original_company.employee_range_id,
'export_experience_category': original_company.export_experience_category_id,
'export_potential': original_company.export_potential,
'export_to_countries': [],
'future_interest_countries': [],
'global_headquarters': original_company.global_headquarters,
'global_ultimate_duns_number': '291332174',
'great_profile_status': original_company.great_profile_status,
'headquarter_type': original_company.headquarter_type,
'id': original_company.id,
'is_number_of_employees_estimated': original_company.is_number_of_employees_estimated,
'is_turnover_estimated': original_company.is_turnover_estimated,
'modified_by': original_company.modified_by_id,
'name': original_company.name,
'number_of_employees': original_company.number_of_employees,
'one_list_account_owner': original_company.one_list_account_owner,
'one_list_tier': original_company.one_list_tier,
'pending_dnb_investigation': original_company.pending_dnb_investigation,
'reference_code': original_company.reference_code,
'registered_address_1': original_company.registered_address_1,
'registered_address_2': original_company.registered_address_2,
'registered_address_country': original_company.registered_address_country_id,
'registered_address_county': original_company.registered_address_county,
'registered_address_postcode': original_company.registered_address_postcode,
'registered_address_town': original_company.registered_address_town,
'sector': original_company.sector_id,
'trading_names': original_company.trading_names,
'transfer_reason': original_company.transfer_reason,
'transferred_by': None,
'transferred_on': None,
'transferred_to': None,
'turnover': original_company.turnover,
'turnover_range': original_company.turnover_range_id,
'uk_region': original_company.uk_region_id,
'vat_number': original_company.vat_number,
'website': original_company.website,
'dnb_modified_on': now(),
}
@freeze_time('2019-01-01 11:12:13')
def test_update_company_from_dnb_data_does_not_exist(dnb_response_uk, caplog):
"""
Test the update_company_from_dnb_data command when the company does not exist in Data Hub.
"""
task_result = update_company_from_dnb_data.apply_async(args=[dnb_response_uk['results'][0]])
assert not task_result.successful()
assert 'Company matching duns_number was not found' in caplog.text
@freeze_time('2019-01-01 11:12:13')
def test_update_company_from_dnb_data_fails_validation(dnb_response_uk, caplog):
"""
Test the update_company_from_dnb_data command when the company data does not pass validation
checks.
"""
CompanyFactory(duns_number='123456789')
dnb_response_uk['results'][0]['primary_name'] = 'a' * 9999
task_result = update_company_from_dnb_data.apply_async(args=[dnb_response_uk['results'][0]])
assert not task_result.successful()
assert 'Data from D&B did not pass the Data Hub validation checks.' in caplog.text
| 38.285714
| 96
| 0.643166
| 3,365
| 30,552
| 5.433878
| 0.094205
| 0.096801
| 0.024173
| 0.027345
| 0.859995
| 0.836916
| 0.807657
| 0.785945
| 0.775663
| 0.753514
| 0
| 0.025868
| 0.261063
| 30,552
| 797
| 97
| 38.333752
| 0.784063
| 0.057378
| 0
| 0.713663
| 0
| 0
| 0.223024
| 0.082328
| 0
| 0
| 0
| 0
| 0.055233
| 1
| 0.021802
| false
| 0.001453
| 0.024709
| 0
| 0.047965
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ae8062c1477660b716dc6d25873267d3dccc340c
| 46,521
|
py
|
Python
|
application/src/pytest/python/modules/app_keys/routes_admin_test.py
|
okebinda/base.api.python
|
fdf6dc02ab73d588919f38d6017788f7822cfd04
|
[
"Apache-2.0"
] | null | null | null |
application/src/pytest/python/modules/app_keys/routes_admin_test.py
|
okebinda/base.api.python
|
fdf6dc02ab73d588919f38d6017788f7822cfd04
|
[
"Apache-2.0"
] | 2
|
2021-06-02T03:26:04.000Z
|
2021-09-30T03:04:00.000Z
|
application/src/pytest/python/modules/app_keys/routes_admin_test.py
|
okebinda/base.api.python
|
fdf6dc02ab73d588919f38d6017788f7822cfd04
|
[
"Apache-2.0"
] | null | null | null |
from copy import copy
import re
import base64
import pytest
from werkzeug.exceptions import NotFound, Unauthorized
from sqlalchemy.orm.exc import NoResultFound
from fixtures import Fixtures
from app import create_app
from config import Config
from modules.app_keys.routes_admin import get_app_keys, post_app_keys, \
get_app_key, put_app_key, delete_app_key
from modules.app_keys.model import AppKey
from modules.administrators.model import Administrator
from modules.roles.model import Role
@pytest.fixture
def app(request):
config = copy(Config)
config.TESTING = True
config.APP_TYPE = 'admin' if 'admin_api' in request.keywords else 'public'
app = create_app(config)
if 'unit' in request.keywords:
yield app
else:
fixtures = Fixtures(app)
fixtures.setup()
yield app
fixtures.teardown()
# UNIT TESTS
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_keys(app, mocker):
expected_status = 200
expected_length = 2
expected_properties = ['application', 'created_at', 'id', 'key', 'status',
'status_changed_at', 'updated_at']
expected_limit = 10
expected_page = 1
expected_total = 2
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.filter.return_value \
.order_by.return_value \
.limit.return_value \
.offset.return_value \
.__iter__.return_value = [AppKey()] * expected_length
query_mock.return_value \
.filter.return_value \
.order_by.return_value \
.count.return_value = expected_total
result = get_app_keys()
assert result[1] == expected_status
assert len(result[0].json['app_keys']) == expected_length
assert result[0].json['app_keys'][0] == {
x: None for x in expected_properties}
assert result[0].json['limit'] == expected_limit
assert result[0].json['page'] == expected_page
assert result[0].json['total'] == expected_total
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_keys_limit_10_page_2_of_3(app, mocker):
expected_status = 200
expected_length = 10
expected_properties = ['application', 'created_at', 'id', 'key', 'status',
'status_changed_at', 'updated_at']
expected_limit = 10
expected_page = 2
expected_total = 25
expected_previous_uri = 'http://localhost/app_keys/1/10'
expected_next_uri = 'http://localhost/app_keys/3/10'
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.filter.return_value \
.order_by.return_value \
.limit.return_value \
.offset.return_value \
.__iter__.return_value = [AppKey()] * expected_length
query_mock.return_value \
.filter.return_value \
.order_by.return_value \
.count.return_value = expected_total
result = get_app_keys(expected_page, expected_limit)
assert result[1] == expected_status
assert len(result[0].json['app_keys']) == expected_length
assert result[0].json['app_keys'][0] == {
x: None for x in expected_properties}
assert result[0].json['previous_uri'] == expected_previous_uri
assert result[0].json['next_uri'] == expected_next_uri
assert result[0].json['limit'] == expected_limit
assert result[0].json['page'] == expected_page
assert result[0].json['total'] == expected_total
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_keys_empty(app, mocker):
expected_status = 204
expected_content = ''
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.filter.return_value \
.order_by.return_value \
.limit.return_value \
.offset.return_value \
.__iter__.return_value = []
query_mock.return_value \
.filter.return_value \
.order_by.return_value \
.count.return_value = 15
result = get_app_keys(5, 10)
assert result[1] == expected_status
assert result[0] == expected_content
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_keys_route(app, mocker, client):
expected_status = 200
expected_length = 10
expected_limit = 10
expected_page = 1
expected_total = 15
expected_next_uri = 'http://localhost/app_keys/2/10'
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock user login db query
role2 = Role()
role2.id = 2
role2.name = 'SUPER_ADMIN'
role2.password_reset_days = 365
admin1 = Administrator()
admin1.id = 1
admin1.password = 'admin1pass'
admin1.roles = [role2]
query_mock.return_value \
.filter.return_value \
.first.return_value = admin1
auth_db_mock = mocker.patch('modules.administrators.authentication.db')
auth_db_mock.add.return_value = None
auth_db_mock.commit.return_value = None
query_mock.return_value \
.filter.return_value \
.order_by.return_value \
.limit.return_value \
.offset.return_value \
.__iter__.return_value = [AppKey()] * expected_length
query_mock.return_value \
.filter.return_value \
.order_by.return_value \
.count.return_value = expected_total
# mock user login
auth_mock = mocker.patch(
'modules.administrators.Authentication.is_account_locked')
auth_mock.return_value = False
credentials = base64.b64encode(
'admin1:admin1pass'.encode('ascii')).decode('utf-8')
response = client.get("/app_keys?app_key=123",
headers={"Authorization": f"Basic {credentials}"})
assert response.status_code == expected_status
assert len(response.json['app_keys']) == expected_length
assert response.json['limit'] == expected_limit
assert response.json['page'] == expected_page
assert response.json['total'] == expected_total
assert response.json['next_uri'] == expected_next_uri
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_keys_limit_5_page_2_of_3_route(app, mocker, client):
expected_status = 200
expected_length = 5
expected_limit = 5
expected_page = 2
expected_total = 12
expected_next_uri = 'http://localhost/app_keys/3/5'
expected_previous_uri = 'http://localhost/app_keys/1/5'
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock user login db query
role2 = Role()
role2.id = 2
role2.name = 'SUPER_ADMIN'
role2.password_reset_days = 365
admin1 = Administrator()
admin1.id = 1
admin1.password = 'admin1pass'
admin1.roles = [role2]
query_mock.return_value \
.filter.return_value \
.first.return_value = admin1
auth_db_mock = mocker.patch('modules.administrators.authentication.db')
auth_db_mock.add.return_value = None
auth_db_mock.commit.return_value = None
query_mock.return_value \
.filter.return_value \
.order_by.return_value \
.limit.return_value \
.offset.return_value \
.__iter__.return_value = [AppKey()] * expected_length
query_mock.return_value \
.filter.return_value \
.order_by.return_value \
.count.return_value = expected_total
# mock user login
auth_mock = mocker.patch(
'modules.administrators.Authentication.is_account_locked')
auth_mock.return_value = False
credentials = base64.b64encode(
'admin1:admin1pass'.encode('ascii')).decode('utf-8')
response = client.get(
"/app_keys/{}/{}?app_key=123".format(expected_page,
expected_limit),
headers={"Authorization": f"Basic {credentials}"})
assert response.status_code == expected_status
assert len(response.json['app_keys']) == expected_length
assert response.json['limit'] == expected_limit
assert response.json['page'] == expected_page
assert response.json['total'] == expected_total
assert response.json['next_uri'] == expected_next_uri
assert response.json['previous_uri'] == expected_previous_uri
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_keys_empty_route(app, mocker, client):
expected_status = 204
expected_json = None
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock user login db query
role2 = Role()
role2.id = 2
role2.name = 'SUPER_ADMIN'
role2.password_reset_days = 365
admin1 = Administrator()
admin1.id = 1
admin1.password = 'admin1pass'
admin1.roles = [role2]
query_mock.return_value \
.filter.return_value \
.first.return_value = admin1
auth_db_mock = mocker.patch('modules.administrators.authentication.db')
auth_db_mock.add.return_value = None
auth_db_mock.commit.return_value = None
query_mock.return_value \
.filter.return_value \
.order_by.return_value \
.limit.return_value \
.offset.return_value \
.__iter__.return_value = []
query_mock.return_value \
.filter.return_value \
.order_by.return_value \
.count.return_value = 15
# mock user login
auth_mock = mocker.patch(
'modules.administrators.Authentication.is_account_locked')
auth_mock.return_value = False
credentials = base64.b64encode(
'admin1:admin1pass'.encode('ascii')).decode('utf-8')
response = client.get("/app_keys/3?app_key=123",
headers={"Authorization": f"Basic {credentials}"})
assert response.status_code == expected_status
assert response.json == expected_json
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_keys_route_no_app_key(app, client):
expected_status = 401
response = client.get("/app_keys")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_keys_route_bad_app_key(app, mocker, client):
expected_status = 401
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.side_effect = NoResultFound()
response = client.get("/app_keys?app_key=BAD_KEY")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_keys_route_unauthorized(app, mocker, client):
expected_status = 401
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock user login
auth_mock = mocker.patch('modules.administrators.Authentication')
auth_mock.verify_password.side_effect = Unauthorized()
response = client.get("/app_keys?app_key=123")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_key_ok(app, mocker):
expected_status = 200
expected_properties = ['application', 'created_at', 'id', 'key', 'status',
'status_changed_at', 'updated_at']
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.get.return_value = AppKey()
result = get_app_key(1)
assert result[1] == expected_status
assert result[0].json['app_key'] == {
x: None for x in expected_properties}
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_key_not_found(app, mocker):
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.get.return_value = None
try:
get_app_key(250)
assert False
except NotFound:
assert True
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_key_route_ok(app, mocker, client):
expected_status = 200
# mock db query
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock user login db query
role2 = Role()
role2.id = 2
role2.name = 'SUPER_ADMIN'
role2.password_reset_days = 365
admin1 = Administrator()
admin1.id = 1
admin1.password = 'admin1pass'
admin1.roles = [role2]
query_mock.return_value \
.filter.return_value \
.first.return_value = admin1
auth_db_mock = mocker.patch('modules.administrators.authentication.db')
auth_db_mock.add.return_value = None
auth_db_mock.commit.return_value = None
# mock resource query
query_mock.return_value \
.get.return_value = AppKey()
# mock user login
auth_mock = mocker.patch(
'modules.administrators.Authentication.is_account_locked')
auth_mock.return_value = False
credentials = base64.b64encode(
'admin1:admin1pass'.encode('ascii')).decode('utf-8')
response = client.get("/app_key/1?app_key=123",
headers={"Authorization": f"Basic {credentials}"})
assert response.status_code == expected_status
assert 'app_key' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_key_route_no_app_key(app, client):
expected_status = 401
response = client.get("/app_key/1")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_key_route_bad_app_key(app, mocker, client):
expected_status = 401
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.side_effect = NoResultFound()
response = client.get("/app_key/1?app_key=BAD_KEY")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_get_app_key_route_unauthorized(app, mocker, client):
expected_status = 401
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock user login
auth_mock = mocker.patch('modules.administrators.Authentication')
auth_mock.verify_password.side_effect = Unauthorized()
response = client.get("/app_key/1?app_key=123")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_post_app_key_ok(app, mocker):
expected_status = 201
expected_m_length = 7
expected_m_application = 'Test Application'
expected_m_id = None
expected_m_key = 'B8CzqaJWs9TmffSJjxDCFrepzhvYzrKz'
expected_m_status = AppKey.STATUS_ENABLED
expected_m_created_at = None
expected_m_updated_at = None
# @todo: timezone
re_datetime = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$")
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {
'application': expected_m_application,
'key': expected_m_key,
"status": expected_m_status
}
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.filter.return_value \
.first.return_value = None
db_mock = mocker.patch('modules.app_keys.routes_admin.db')
db_mock.add.return_value = None
db_mock.commit.return_value = None
result = post_app_keys()
assert result[1] == expected_status
assert 'app_key' in result[0].json
assert len(result[0].json['app_key']) == expected_m_length
assert result[0].json['app_key']['application'] == expected_m_application
assert result[0].json['app_key']['id'] == expected_m_id
assert result[0].json['app_key']['key'] == expected_m_key
assert result[0].json['app_key']['status'] == expected_m_status
assert bool(re_datetime.match(
result[0].json['app_key']['status_changed_at']))
assert result[0].json['app_key']['created_at'] == expected_m_created_at
assert result[0].json['app_key']['updated_at'] == expected_m_updated_at
@pytest.mark.unit
@pytest.mark.admin_api
def test_post_app_key_unique_fail(app, mocker):
expected_status = 400
expected_json = {'error': {'key': ['Value must be unique.']}}
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {
'application': "Test Application",
'key': "B8CzqaJWs9TmffSJjxDCFrepzhvYzrKz",
"status": 1
}
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.filter.return_value \
.first.side_effect = [AppKey(), None]
result = post_app_keys()
assert result[1] == expected_status
assert result[0].json == expected_json
@pytest.mark.unit
@pytest.mark.admin_api
def test_post_app_key_required_fail(app, mocker):
expected_status = 400
expected_json = {
'error': {
'application': ['Missing data for required field.'],
'foo': ['Unknown field.'],
'key': ['Missing data for required field.'],
'status': ['Missing data for required field.'],
}
}
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {'foo': "bar"}
result = post_app_keys()
assert result[1] == expected_status
assert result[0].json == expected_json
@pytest.mark.unit
@pytest.mark.admin_api
def test_post_app_key_min_fail(app, mocker):
expected_status = 400
expected_json = {
'error': {
'application': [
'Value must be between 2 and 200 characters long.'],
'key': ['Value must be 32 characters long.'],
}
}
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {
'application': "T",
'key': "B8CzqaJWs9TmffSJjxDCFrepzhvYzrK",
"status": 1
}
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.filter.return_value \
.first.return_value = None
result = post_app_keys()
assert result[1] == expected_status
assert result[0].json == expected_json
@pytest.mark.unit
@pytest.mark.admin_api
def test_post_app_key_max_fail(app, mocker):
expected_status = 400
expected_json = {
'error': {
'application': [
'Value must be between 2 and 200 characters long.'],
'key': ['Value must be 32 characters long.'],
}
}
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {
'application': "9xAqdEjnQ8uHmQjnSWUutERKfmgBFjWWsKkwKy4EBbpjeC8FuAXYH4bBqg5FVGapD47LTDsJmUU7dgUrxBVuSjhRUcQvxxukMvVs87ndpZ76DK9ZULFB77DjGDxmqJ5QHfEV6FjNXK2sbkFzdUBbbkPkcGpvgMqamdP33WpMFcDXpAftcRJyUJtMpVStZ3MMBS7LLVuBaDSBznGSfnpzTk6dS8zhnxpy8EayF6LSuKUjN3d2JkCrRDge5W8Rcmve",
'key': "AFJdJ9JCUhASZ4cA2ptC7CA72bYKLZD28",
"status": 1
}
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.filter.return_value \
.first.return_value = None
result = post_app_keys()
assert result[1] == expected_status
assert result[0].json == expected_json
@pytest.mark.unit
@pytest.mark.admin_api
def test_post_app_key_type_fail(app, mocker):
expected_status = 400
expected_json = {
'error': {
'application': ['Not a valid string.'],
'key': ['Not a valid string.'],
'status': ['Not a valid integer.'],
}
}
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {
'application': 10,
'key': 15,
"status": 'enabled'
}
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.filter.return_value \
.first.return_value = None
result = post_app_keys()
assert result[1] == expected_status
assert result[0].json == expected_json
@pytest.mark.unit
@pytest.mark.admin_api
def test_post_app_key_route_ok(app, mocker, client):
expected_status = 201
expected_m_length = 7
expected_m_application = 'Test Application'
expected_m_id = None
expected_m_key = 'B8CzqaJWs9TmffSJjxDCFrepzhvYzrKz'
expected_m_status = AppKey.STATUS_ENABLED
expected_m_created_at = None
expected_m_updated_at = None
# @todo: timezone
re_datetime = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$")
data = {
'application': expected_m_application,
'key': expected_m_key,
"status": expected_m_status
}
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock user login db query
role2 = Role()
role2.id = 2
role2.name = 'SUPER_ADMIN'
role2.password_reset_days = 365
admin1 = Administrator()
admin1.id = 1
admin1.password = 'admin1pass'
admin1.roles = [role2]
query_mock.return_value \
.filter.return_value \
.first.side_effect = [admin1, None, None]
auth_db_mock = mocker.patch('modules.administrators.authentication.db')
auth_db_mock.add.return_value = None
auth_db_mock.commit.return_value = None
db_mock = mocker.patch('modules.app_keys.routes_admin.db')
db_mock.add.return_value = None
db_mock.commit.return_value = None
# mock user login
auth_mock = mocker.patch(
'modules.administrators.Authentication.is_account_locked')
auth_mock.return_value = False
credentials = base64.b64encode(
'admin1:admin1pass'.encode('ascii')).decode('utf-8')
response = client.post(
"/app_keys?app_key=123",
json=data,
headers={"Authorization": f"Basic {credentials}"})
assert response.status_code == expected_status
assert 'app_key' in response.json
assert len(response.json['app_key']) == expected_m_length
assert response.json['app_key']['application'] == expected_m_application
assert response.json['app_key']['id'] == expected_m_id
assert response.json['app_key']['key'] == expected_m_key
assert response.json['app_key']['status'] == expected_m_status
assert bool(re_datetime.match(
response.json['app_key']['status_changed_at']))
assert response.json['app_key']['created_at'] == expected_m_created_at
assert response.json['app_key']['updated_at'] == expected_m_updated_at
@pytest.mark.unit
@pytest.mark.admin_api
def test_post_app_key_route_no_app_key(app, client):
expected_status = 401
response = client.post("/app_keys")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_post_app_key_route_bad_app_key(app, mocker, client):
expected_status = 401
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.side_effect = NoResultFound()
response = client.post("/app_keys?app_key=BAD_KEY")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_post_app_key_route_unauthorized(app, mocker, client):
expected_status = 401
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock user login
auth_mock = mocker.patch('modules.administrators.Authentication')
auth_mock.verify_password.side_effect = Unauthorized()
response = client.post("/app_keys?app_key=123")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_put_app_key_ok(app, mocker):
expected_status = 200
expected_m_length = 7
expected_m_application = 'Test Application A'
expected_m_id = 1
expected_m_key = 'B8CzqaJWs9TmffSJjxDCFrepzhvYzrKA'
expected_m_status = AppKey.STATUS_ENABLED
expected_m_created_at = None
expected_m_updated_at = None
# @todo: timezone
re_datetime = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$")
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {
'application': expected_m_application,
'key': expected_m_key,
"status": expected_m_status
}
app_key_1 = AppKey()
app_key_1.id = expected_m_id
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.get.return_value = app_key_1
query_mock.return_value \
.filter.return_value \
.first.return_value = None
db_mock = mocker.patch('modules.app_keys.routes_admin.db')
db_mock.commit.return_value = None
result = put_app_key(expected_m_id)
assert result[1] == expected_status
assert 'app_key' in result[0].json
assert len(result[0].json['app_key']) == expected_m_length
assert result[0].json['app_key']['application'] == expected_m_application
assert result[0].json['app_key']['id'] == expected_m_id
assert result[0].json['app_key']['key'] == expected_m_key
assert result[0].json['app_key']['status'] == expected_m_status
assert bool(re_datetime.match(
result[0].json['app_key']['status_changed_at']))
assert result[0].json['app_key']['created_at'] == expected_m_created_at
assert result[0].json['app_key']['updated_at'] == expected_m_updated_at
@pytest.mark.unit
@pytest.mark.admin_api
def test_put_app_key_unique_fail(app, mocker):
expected_status = 400
expected_json = {'error': {'key': ['Value must be unique.']}}
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {
'application': "Test Application",
'key': "B8CzqaJWs9TmffSJjxDCFrepzhvYzrKz",
"status": 1
}
app_key_1 = AppKey()
app_key_1.id = 1
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.get.return_value = app_key_1
query_mock.return_value \
.filter.return_value \
.first.return_value = AppKey()
result = put_app_key(1)
assert result[1] == expected_status
assert result[0].json == expected_json
@pytest.mark.unit
@pytest.mark.admin_api
def test_put_app_key_required_fail(app, mocker):
expected_status = 400
expected_json = {
'error': {
'application': ['Missing data for required field.'],
'foo': ['Unknown field.'],
'key': ['Missing data for required field.'],
'status': ['Missing data for required field.'],
}
}
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {'foo': "bar"}
app_key_1 = AppKey()
app_key_1.id = 1
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.get.return_value = app_key_1
query_mock.return_value \
.filter.return_value \
.first.return_value = None
result = put_app_key(1)
assert result[1] == expected_status
assert result[0].json == expected_json
@pytest.mark.unit
@pytest.mark.admin_api
def test_put_app_key_min_fail(app, mocker):
expected_status = 400
expected_json = {
'error': {
'application': [
'Value must be between 2 and 200 characters long.'],
'key': ['Value must be 32 characters long.'],
}
}
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {
'application': "T",
'key': "B8CzqaJWs9TmffSJjxDCFrepzhvYzrK",
"status": 1
}
app_key_1 = AppKey()
app_key_1.id = 1
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.get.return_value = app_key_1
query_mock.return_value \
.filter.return_value \
.first.return_value = None
result = put_app_key(1)
assert result[1] == expected_status
assert result[0].json == expected_json
@pytest.mark.unit
@pytest.mark.admin_api
def test_put_app_key_max_fail(app, mocker):
expected_status = 400
expected_json = {
'error': {
'application': [
'Value must be between 2 and 200 characters long.'],
'key': ['Value must be 32 characters long.'],
}
}
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {
'application': "9xAqdEjnQ8uHmQjnSWUutERKfmgBFjWWsKkwKy4EBbpjeC8FuAXYH4bBqg5FVGapD47LTDsJmUU7dgUrxBVuSjhRUcQvxxukMvVs87ndpZ76DK9ZULFB77DjGDxmqJ5QHfEV6FjNXK2sbkFzdUBbbkPkcGpvgMqamdP33WpMFcDXpAftcRJyUJtMpVStZ3MMBS7LLVuBaDSBznGSfnpzTk6dS8zhnxpy8EayF6LSuKUjN3d2JkCrRDge5W8Rcmve",
'key': "AFJdJ9JCUhASZ4cA2ptC7CA72bYKLZD28",
"status": 1
}
app_key_1 = AppKey()
app_key_1.id = 1
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.get.return_value = app_key_1
query_mock.return_value \
.filter.return_value \
.first.return_value = None
result = put_app_key(1)
assert result[1] == expected_status
assert result[0].json == expected_json
@pytest.mark.unit
@pytest.mark.admin_api
def test_put_app_key_type_fail(app, mocker):
expected_status = 400
expected_json = {
'error': {
'application': ['Not a valid string.'],
'key': ['Not a valid string.'],
'status': ['Not a valid integer.'],
}
}
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {
'application': 10,
'key': 15,
"status": 'enabled'
}
app_key_1 = AppKey()
app_key_1.id = 1
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.get.return_value = app_key_1
query_mock.return_value \
.filter.return_value \
.first.return_value = None
result = put_app_key(1)
assert result[1] == expected_status
assert result[0].json == expected_json
@pytest.mark.unit
@pytest.mark.admin_api
def test_put_app_key_route_ok(app, mocker, client):
expected_status = 200
expected_m_length = 7
expected_m_application = 'Test Application A'
expected_m_id = AppKey.STATUS_ENABLED
expected_m_key = 'B8CzqaJWs9TmffSJjxDCFrepzhvYzrKA'
expected_m_status = AppKey.STATUS_DISABLED
expected_m_created_at = None
expected_m_updated_at = None
# @todo: timezone
re_datetime = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$")
data = {
'application': expected_m_application,
'key': expected_m_key,
"status": expected_m_status
}
app_key_1 = AppKey()
app_key_1.id = expected_m_id
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock user login db query
role2 = Role()
role2.id = 2
role2.name = 'SUPER_ADMIN'
role2.password_reset_days = 365
admin1 = Administrator()
admin1.id = 1
admin1.password = 'admin1pass'
admin1.roles = [role2]
query_mock.return_value \
.filter.return_value \
.first.side_effect = [admin1, None]
auth_db_mock = mocker.patch('modules.administrators.authentication.db')
auth_db_mock.add.return_value = None
auth_db_mock.commit.return_value = None
query_mock.return_value \
.get.return_value = app_key_1
db_mock = mocker.patch('modules.app_keys.routes_admin.db')
db_mock.commit.return_value = None
# mock user login
auth_mock = mocker.patch(
'modules.administrators.Authentication.is_account_locked')
auth_mock.return_value = False
credentials = base64.b64encode(
'admin1:admin1pass'.encode('ascii')).decode('utf-8')
response = client.put(
"/app_key/{}?app_key=123".format(expected_m_id),
json=data,
headers={"Authorization": f"Basic {credentials}"})
assert response.status_code == expected_status
assert 'app_key' in response.json
assert len(response.json['app_key']) == expected_m_length
assert response.json['app_key']['application'] == expected_m_application
assert response.json['app_key']['id'] == expected_m_id
assert response.json['app_key']['key'] == expected_m_key
assert response.json['app_key']['status'] == expected_m_status
assert bool(re_datetime.match(
response.json['app_key']['status_changed_at']))
assert response.json['app_key']['created_at'] == expected_m_created_at
assert response.json['app_key']['updated_at'] == expected_m_updated_at
@pytest.mark.unit
@pytest.mark.admin_api
def test_put_app_key_route_no_app_key(app, client):
expected_status = 401
response = client.put("/app_key/1")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_put_app_key_route_bad_app_key(app, mocker, client):
expected_status = 401
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.side_effect = NoResultFound()
response = client.put("/app_key/1?app_key=BAD_KEY")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_put_app_key_route_unauthorized(app, mocker, client):
expected_status = 401
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock user login
auth_mock = mocker.patch('modules.administrators.Authentication')
auth_mock.verify_password.side_effect = Unauthorized()
response = client.put("/app_key/1?app_key=123")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_delete_app_key_ok(app, mocker):
expected_status = 204
expected_content = ''
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.get.return_value = AppKey()
db_mock = mocker.patch('modules.app_keys.routes_admin.db')
db_mock.commit.return_value = None
result = delete_app_key(1)
assert result[1] == expected_status
assert result[0] == expected_content
@pytest.mark.unit
@pytest.mark.admin_api
def test_delete_app_key_fail(app, mocker):
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
query_mock.return_value \
.get.return_value = None
try:
delete_app_key(250)
assert False
except NotFound:
assert True
@pytest.mark.unit
@pytest.mark.admin_api
def test_delete_app_key_route_ok(app, mocker, client):
expected_status = 204
expected_json = None
# mock db query
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock resource query
query_mock.return_value \
.get.return_value = AppKey()
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock user login db query
role2 = Role()
role2.id = 2
role2.name = 'SUPER_ADMIN'
role2.password_reset_days = 365
admin1 = Administrator()
admin1.id = 1
admin1.password = 'admin1pass'
admin1.roles = [role2]
query_mock.return_value \
.filter.return_value \
.first.return_value = admin1
auth_db_mock = mocker.patch('modules.administrators.authentication.db')
auth_db_mock.add.return_value = None
auth_db_mock.commit.return_value = None
# mock user login
auth_mock = mocker.patch(
'modules.administrators.Authentication.is_account_locked')
auth_mock.return_value = False
credentials = base64.b64encode(
'admin1:admin1pass'.encode('ascii')).decode('utf-8')
response = client.delete("/app_key/5?app_key=123",
headers={"Authorization": f"Basic {credentials}"})
assert response.status_code == expected_status
assert response.json == expected_json
@pytest.mark.unit
@pytest.mark.admin_api
def test_delete_app_key_route_no_app_key(app, client):
expected_status = 401
response = client.delete("/app_key/5")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_delete_app_key_route_bad_app_key(app, mocker, client):
expected_status = 401
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.side_effect = NoResultFound()
response = client.delete("/app_key/5?app_key=BAD_KEY")
assert response.status_code == expected_status
assert 'error' in response.json
@pytest.mark.unit
@pytest.mark.admin_api
def test_delete_app_key_route_unauthorized(app, mocker, client):
expected_status = 401
query_mock = mocker.patch('flask_sqlalchemy._QueryProperty.__get__')
# mock app key authorization db query
query_mock.return_value \
.filter.return_value \
.one.return_value = AppKey()
# mock user login
auth_mock = mocker.patch('modules.administrators.Authentication')
auth_mock.verify_password.side_effect = Unauthorized()
response = client.delete("/app_key/5?app_key=123")
assert response.status_code == expected_status
assert 'error' in response.json
# INTEGRATION TESTS
@pytest.mark.integration
@pytest.mark.admin_api
def test_get_app_keys_route_with_data(client):
expected_status = 200
expected_json = {
"app_keys": [
{
"application": "Application 1",
"created_at": "2018-01-01T00:00:00+0000",
"id": 1,
"key": "7sv3aPS45Ck8URGRKUtBdMWgKFN4ahfW",
"status": 1,
"status_changed_at": "2018-01-03T00:00:00+0000",
"updated_at": "2018-01-02T00:00:00+0000"
},
{
"application": "Application 2",
"created_at": "2018-01-05T00:00:00+0000",
"id": 2,
"key": "cvBtQGgL9gNnSZk4DmKnva4QMcpTV7Mx",
"status": 1,
"status_changed_at": "2018-01-07T00:00:00+0000",
"updated_at": "2018-01-06T00:00:00+0000"
},
{
"application": "Application 3",
"created_at": "2018-01-10T00:00:00+0000",
"id": 3,
"key": "9CR45hFpTahbqDvmZFJdENAKz5VPqLG3",
"status": 2,
"status_changed_at": "2018-01-12T00:00:00+0000",
"updated_at": "2018-01-11T00:00:00+0000"
},
{
"application": "Application 6",
"created_at": "2018-01-25T00:00:00+0000",
"id": 6,
"key": "kP4k7vun5RwTBbESwHrCuDdFUtRchbVf",
"status": 5,
"status_changed_at": "2018-01-27T00:00:00+0000",
"updated_at": "2018-01-26T00:00:00+0000"
}
],
"limit": 10,
"page": 1,
"total": 4
}
credentials = base64.b64encode(
'admin1:admin1pass'.encode('ascii')).decode('utf-8')
response = client.get(
"/app_keys?app_key=7sv3aPS45Ck8URGRKUtBdMWgKFN4ahfW",
headers={"Authorization": f"Basic {credentials}"})
assert response.status_code == expected_status
assert response.json == expected_json
@pytest.mark.integration
@pytest.mark.admin_api
def test_get_app_key_1_route_with_data(client):
expected_status = 200
expected_json = {
"app_key": {
"application": "Application 1",
"created_at": "2018-01-01T00:00:00+0000",
"id": 1,
"key": "7sv3aPS45Ck8URGRKUtBdMWgKFN4ahfW",
"status": 1,
"status_changed_at": "2018-01-03T00:00:00+0000",
"updated_at": "2018-01-02T00:00:00+0000"
}
}
credentials = base64.b64encode(
'admin1:admin1pass'.encode('ascii')).decode('utf-8')
response = client.get(
"/app_key/1?app_key=7sv3aPS45Ck8URGRKUtBdMWgKFN4ahfW",
headers={"Authorization": f"Basic {credentials}"})
assert response.status_code == expected_status
assert response.json == expected_json
@pytest.mark.integration
@pytest.mark.admin_api
def test_post_app_keys_route_with_data(client, mocker):
expected_status = 201
expected_m_length = 7
expected_m_application = 'Test Application'
expected_m_id = 7
expected_m_key = 'B8CzqaJWs9TmffSJjxDCFrepzhvYzrKz'
expected_m_status = AppKey.STATUS_ENABLED
re_datetime = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+\d{4}$")
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {
'application': expected_m_application,
'key': expected_m_key,
"status": expected_m_status
}
credentials = base64.b64encode(
'admin1:admin1pass'.encode('ascii')).decode('utf-8')
response = client.post(
"/app_keys?app_key=7sv3aPS45Ck8URGRKUtBdMWgKFN4ahfW",
headers={"Authorization": f"Basic {credentials}"})
assert response.status_code == expected_status
assert 'app_key' in response.json
assert len(response.json['app_key']) == expected_m_length
assert response.json['app_key']['application'] == expected_m_application
assert response.json['app_key']['id'] == expected_m_id
assert response.json['app_key']['key'] == expected_m_key
assert response.json['app_key']['status'] == expected_m_status
assert bool(re_datetime.match(
response.json['app_key']['status_changed_at']))
assert bool(re_datetime.match(response.json['app_key']['created_at']))
assert bool(re_datetime.match(response.json['app_key']['updated_at']))
@pytest.mark.integration
@pytest.mark.admin_api
def test_put_app_keys_route_with_data(client, mocker):
expected_status = 200
expected_m_length = 7
expected_m_application = 'Application 2a'
expected_m_id = 2
expected_m_key = 'cvBtQGgL9gNnSZk4DmKnva4QMcpTV7MA'
expected_m_status = AppKey.STATUS_DISABLED
re_datetime = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+\d{4}$")
request_mock = mocker.patch('modules.app_keys.routes_admin.request')
request_mock.json = {
'application': expected_m_application,
'key': expected_m_key,
"status": expected_m_status
}
credentials = base64.b64encode(
'admin1:admin1pass'.encode('ascii')).decode('utf-8')
response = client.put(
"/app_key/{}?app_key=7sv3aPS45Ck8URGRKUtBdMWgKFN4ahfW".format(
expected_m_id), headers={"Authorization": f"Basic {credentials}"})
assert response.status_code == expected_status
assert 'app_key' in response.json
assert len(response.json['app_key']) == expected_m_length
assert response.json['app_key']['application'] == expected_m_application
assert response.json['app_key']['id'] == expected_m_id
assert response.json['app_key']['key'] == expected_m_key
assert response.json['app_key']['status'] == expected_m_status
assert bool(re_datetime.match(
response.json['app_key']['status_changed_at']))
assert bool(re_datetime.match(response.json['app_key']['created_at']))
assert bool(re_datetime.match(response.json['app_key']['updated_at']))
@pytest.mark.integration
@pytest.mark.admin_api
def test_delete_app_key_1_route_with_data(client):
expected_status = 204
expected_json = None
credentials = base64.b64encode(
'admin1:admin1pass'.encode('ascii')).decode('utf-8')
response = client.delete(
"/app_key/5?app_key=7sv3aPS45Ck8URGRKUtBdMWgKFN4ahfW",
headers={"Authorization": f"Basic {credentials}"})
assert response.status_code == expected_status
assert response.json == expected_json
| 30.62607
| 282
| 0.677372
| 5,810
| 46,521
| 5.111015
| 0.038898
| 0.08594
| 0.03738
| 0.045125
| 0.959791
| 0.950699
| 0.946287
| 0.935646
| 0.927564
| 0.911433
| 0
| 0.028932
| 0.210228
| 46,521
| 1,518
| 283
| 30.646245
| 0.779293
| 0.0299
| 0
| 0.823165
| 0
| 0.005305
| 0.202902
| 0.115228
| 0
| 0
| 0
| 0.000659
| 0.140584
| 1
| 0.041556
| false
| 0.027409
| 0.011494
| 0
| 0.05305
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
88266dc98cbd79aacbafa6020a28ef00af998f4c
| 6,182
|
py
|
Python
|
test_Geodesy.py
|
DatHydroGuy/Geodesy
|
4d42b969e99066ca5427604d3cfe0590e909cdf2
|
[
"Apache-2.0"
] | null | null | null |
test_Geodesy.py
|
DatHydroGuy/Geodesy
|
4d42b969e99066ca5427604d3cfe0590e909cdf2
|
[
"Apache-2.0"
] | null | null | null |
test_Geodesy.py
|
DatHydroGuy/Geodesy
|
4d42b969e99066ca5427604d3cfe0590e909cdf2
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from hypothesis import given, strategies as st
import Geodesy
class TestAngle(object):
def test_null_instance_has_zero_degrees(self):
a = Geodesy.Angle()
assert a.degrees == 0
@given(decimal_degrees=st.floats(allow_nan=False, allow_infinity=False))
def test_one_parameter_instance_has_non_zero_degrees(self, decimal_degrees):
a = Geodesy.Angle(decimal_degrees)
assert decimal_degrees == a.degrees
@given(decimal_degrees=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False),
decimal_minutes=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False))
def test_two_parameter_instance_converts_positive_parameter_from_minutes_to_decimal_degrees(self,
decimal_degrees,
decimal_minutes):
a = Geodesy.Angle(decimal_degrees, decimal_minutes)
assert decimal_degrees + decimal_minutes / 60.0 == a.degrees
@given(decimal_degrees=st.floats(max_value=-0.000000000001, allow_nan=False, allow_infinity=False),
decimal_minutes=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False))
def test_two_parameter_instance_converts_negative_parameter_from_minutes_to_decimal_degrees(self,
decimal_degrees,
decimal_minutes):
a = Geodesy.Angle(decimal_degrees, decimal_minutes)
assert decimal_degrees - decimal_minutes / 60.0 == a.degrees
@given(decimal_degrees=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False),
decimal_minutes=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False),
decimal_seconds=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False))
def test_three_parameter_instance_converts_positive_parameter_from_minutes_to_decimal_degrees(self,
decimal_degrees,
decimal_minutes,
decimal_seconds):
a = Geodesy.Angle(decimal_degrees, decimal_minutes, decimal_seconds)
assert decimal_degrees + decimal_minutes / 60.0 + decimal_seconds / 3600.0 == pytest.approx(a.degrees)
@given(decimal_degrees=st.floats(max_value=-0.000000000001, allow_nan=False, allow_infinity=False),
decimal_minutes=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False),
decimal_seconds=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False))
def test_three_parameter_instance_converts_negative_parameter_from_minutes_to_decimal_degrees(self,
decimal_degrees,
decimal_minutes,
decimal_seconds):
a = Geodesy.Angle(decimal_degrees, decimal_minutes, decimal_seconds)
assert decimal_degrees - decimal_minutes / 60.0 - decimal_seconds / 3600.0 == pytest.approx(a.degrees)
@given(decimal_degrees=st.floats(allow_nan=False, allow_infinity=False))
def test_one_named_parameter_instance_has_non_zero_degrees(self, decimal_degrees):
a = Geodesy.Angle(degrees=decimal_degrees)
assert decimal_degrees == a.degrees
@given(decimal_degrees=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False),
decimal_minutes=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False))
def test_two_named_parameter_instance_converts_positive_parameter_from_minutes_to_decimal_degrees(
self, decimal_degrees, decimal_minutes):
a = Geodesy.Angle(degrees=decimal_degrees, minutes=decimal_minutes)
assert decimal_degrees + decimal_minutes / 60.0 == a.degrees
@given(decimal_degrees=st.floats(max_value=-0.000000000001, allow_nan=False, allow_infinity=False),
decimal_minutes=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False))
def test_two_named_parameter_instance_converts_negative_parameter_from_minutes_to_decimal_degrees(
self, decimal_degrees, decimal_minutes):
a = Geodesy.Angle(degrees=decimal_degrees, minutes=decimal_minutes)
assert decimal_degrees - decimal_minutes / 60.0 == a.degrees
@given(decimal_degrees=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False),
decimal_minutes=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False),
decimal_seconds=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False))
def test_three_named_parameter_instance_converts_positive_parameter_from_minutes_to_decimal_degrees(
self, decimal_degrees, decimal_minutes, decimal_seconds):
a = Geodesy.Angle(degrees=decimal_degrees, minutes=decimal_minutes, seconds=decimal_seconds)
assert decimal_degrees + decimal_minutes / 60.0 + decimal_seconds / 3600.0 == pytest.approx(a.degrees)
@given(decimal_degrees=st.floats(max_value=-0.000000000001, allow_nan=False, allow_infinity=False),
decimal_minutes=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False),
decimal_seconds=st.floats(min_value=0.0, allow_nan=False, allow_infinity=False))
def test_three_named_parameter_instance_converts_negative_parameter_from_minutes_to_decimal_degrees(
self, decimal_degrees, decimal_minutes, decimal_seconds):
a = Geodesy.Angle(degrees=decimal_degrees, minutes=decimal_minutes, seconds=decimal_seconds)
assert decimal_degrees - decimal_minutes / 60.0 - decimal_seconds / 3600.0 == pytest.approx(a.degrees)
| 71.883721
| 115
| 0.651731
| 711
| 6,182
| 5.295359
| 0.068917
| 0.178486
| 0.075963
| 0.105179
| 0.959097
| 0.959097
| 0.957238
| 0.957238
| 0.957238
| 0.957238
| 0
| 0.028692
| 0.272727
| 6,182
| 85
| 116
| 72.729412
| 0.808719
| 0
| 0
| 0.739726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150685
| 1
| 0.150685
| false
| 0
| 0.041096
| 0
| 0.205479
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
885f3c5cb2fd98bb5b044ac6f2ba956ab490334f
| 12,698
|
py
|
Python
|
Untitled.py
|
LabSolarBeauchef/LabSolarBeauchef.github.io
|
102a1d70b9fc94896de82976edce0f8888274bab
|
[
"MIT"
] | null | null | null |
Untitled.py
|
LabSolarBeauchef/LabSolarBeauchef.github.io
|
102a1d70b9fc94896de82976edce0f8888274bab
|
[
"MIT"
] | null | null | null |
Untitled.py
|
LabSolarBeauchef/LabSolarBeauchef.github.io
|
102a1d70b9fc94896de82976edce0f8888274bab
|
[
"MIT"
] | null | null | null |
import scipy.io as sio
import csv
import time
tiempo1=time.time()
a='Estaciones_UChile.mat'
archive= sio.loadmat(a)
sio.whosmat(a)
#### ACA SE NOS METEMOS EN ESTRUCTURAS
estructura1 = archive['Estaciones_UChile']
estructura2 = estructura1['Minutos']
###archive.Dias{1, 1}.Datos_diario
### Estaciones_UChile.Minutos.Estacion_UChile_2017{1, 1}.Datos
## ACA ME METO EN LISTAS y esas cosas
lista=estructura2[0][0][0][0][0][0][0][0][0]
lista3=estructura2[0][0][0][0][1][0][0][0][0]
##lista[0] = TIMESTAMP (Son las fechas)
##lista[1] = RECORS (es el numero de caption tomados)
##lista[2] = BattV_min
##lista[3] = PTemp_C_Avg
##lista[4] = Pres_mbar_Avg
##lista[5] = Temp_C_Avg
##lista[6] = Temp_C_Max
##lista[7] = Temp_C_Min
##lista[8] = RH
##lista[9] = Slr_Wm2_Avg (Esta lleno de NAN eso)
##lista[10] = total_flu_langl... (Esta lleno de NAN eso)
##lista[11] = Rain_mm_Tot
##lista[12] = WS_ms_S_WVT
##lista[13] = WindDir_d1_WVT
##lista[14] = WindDir_SD1_WVT
print '-------- Imprimir por minutos-----------'
nombres=['Fechas','RECORS','BattV_min','PTemp_C_Avg','Pres_mbar_Avg','Temp_C_Avg','Temp_C_Max','Temp_C_Min','RH','Slr_Wm2_Avg','Total_flu','Rain_mm_Tot','WS_ms_S_WVT','WindDir_d1_WVT','WindDir_SD1_WVT']
#### BattV_min
for j in range(2,len(nombres)):
nombre= nombres[j] #str(lista2[2][j]).replace(" ", "")
with open('./csv/minutos/'+nombre +'_minutos'+'.csv', 'w') as csvfile:
fieldnames = ['Fechas', nombre ]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(lista[0])): ## LEN de todos los datos de fechas
writer.writerow({'Fechas': lista[0][i][0][0], nombres[j] : str(lista[j][i][0]).replace("[u'NAN']", "0")})
nombre='Datos_temperatura'
with open('./csv/minutos/' + nombre +'_minutos'+'.csv', 'w') as csvfile:
fieldnames = ['Fechas', nombres[5],nombres[6],nombres[7] ]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(lista[0])): ## LEN de todos los datos de fechas
writer.writerow({'Fechas': lista[0][i][0][0], nombres[5]: lista[5][i][0] ,nombres[6]: lista[6][i][0],nombres[7]: lista[7][i][0] })
for j in range(2,len(lista3[1])):
nombre= ((str(lista3[1][j]).replace(" ", "")).replace("u'", "")).replace("'", "") #str(lista2[2][j]).replace(" ", "")
with open('./csv/minutos/'+nombre +'_minutos'+'.csv', 'w') as csvfile:
fieldnames = ['Fechas', nombre ]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(lista3[0])): ## LEN de todos los datos de fechas
writer.writerow({'Fechas': lista3[0][i][0][0], nombre : str(lista3[2][i][j]).replace("[u'NAN']", "0")})
nombre= 'Datos_radiacion'
with open('./csv/minutos/'+nombre +'_minutos'+'.csv', 'w') as csvfile:
fieldnames = ['Fechas', ((str(lista3[1][2]).replace(" ", "")).replace("u'", "")).replace("'", ""), ((str(lista3[1][3]).replace(" ", "")).replace("u'", "")).replace("'", ""),((str(lista3[1][4]).replace(" ", "")).replace("u'", "")).replace("'", "") ]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(lista3[0])): ## LEN de todos los datos de fechas
writer.writerow({'Fechas': lista3[0][i][0][0], ((str(lista3[1][2]).replace(" ", "")).replace("u'", "")).replace("'", "") : str(lista3[2][i][2]).replace("[u'NAN']", "0"), ((str(lista3[1][3]).replace(" ", "")).replace("u'", "")).replace("'", "") : str(lista3[2][i][3]).replace("[u'NAN']", "0"), ((str(lista3[1][4]).replace(" ", "")).replace("u'", "")).replace("'", "") : str(lista3[2][i][4]).replace("[u'NAN']", "0"), })
nombre= 'Datos_radiacion_10min'
with open('./csv/minutos/'+nombre +'_minutos'+'.csv', 'w') as csvfile:
fieldnames = ['Fechas', ((str(lista3[1][2]).replace(" ", "")).replace("u'", "")).replace("'", ""), ((str(lista3[1][3]).replace(" ", "")).replace("u'", "")).replace("'", ""),((str(lista3[1][4]).replace(" ", "")).replace("u'", "")).replace("'", "") ]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(lista3[0])/10): ## LEN de todos los datos de fechas
writer.writerow({'Fechas': lista3[0][i*10][0][0], ((str(lista3[1][2]).replace(" ", "")).replace("u'", "")).replace("'", "") : str(lista3[2][i*10][2]).replace("[u'NAN']", "0"), ((str(lista3[1][3]).replace(" ", "")).replace("u'", "")).replace("'", "") : str(lista3[2][i*10][3]).replace("[u'NAN']", "0"), ((str(lista3[1][4]).replace(" ", "")).replace("u'", "")).replace("'", "") : str(lista3[2][i*10][4]).replace("[u'NAN']", "0"), })
tiempo2=time.time()
print 'Me demoro ' + str(tiempo2-tiempo1) + 'segundos'
print '-------- Imprimir por hora-----------'
estructura3 = estructura1['Horas']
lista2 =estructura3[0][0][0][0][0][0]
for j in range(18):
nombre= str(lista2[2][j]).replace(" ", "")
with open('./csv/hora/'+nombre +'_hora'+'.csv', 'w') as csvfile:
fieldnames = ['Fechas', nombre ]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(lista2[0])): ## LEN de todos los datos de fechas
writer.writerow({'Fechas': lista2[0][i][0][0], nombre : lista2[1][i][j]})
nombre='Datos_radiacion'
with open('./csv/hora/' + nombre +'_hora'+'.csv', 'w') as csvfile:
fieldnames = ['Fechas', str(lista2[2][0]).replace(" ", ""), str(lista2[2][1]).replace(" ", ""),str(lista2[2][2]).replace(" ", "")]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(lista2[0])): ## LEN de todos los datos de fechas
writer.writerow({'Fechas': lista2[0][i][0][0], str(lista2[2][0]).replace(" ", ""): lista2[1][i][0] , str(lista2[2][1]).replace(" ", ""): lista2[1][i][1] ,str(lista2[2][2]).replace(" ", ""): lista2[1][i][0]})
nombre='Datos_temperatura'
with open('./csv/hora/' + nombre +'_hora'+'.csv', 'w') as csvfile:
fieldnames = ['Fechas', str(lista2[2][4]).replace(" ", ""), str(lista2[2][5]).replace(" ", ""),str(lista2[2][6]).replace(" ", "")]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(lista2[0])): ## LEN de todos los datos de fechas
writer.writerow({'Fechas': lista2[0][i][0][0], str(lista2[2][4]).replace(" ", ""): lista2[1][i][4] , str(lista2[2][5]).replace(" ", ""): lista2[1][i][5] ,str(lista2[2][6]).replace(" ", ""): lista2[1][i][6]})
nombre= 'Todos'
with open('./csv/hora/' + nombre +'_hora'+'.csv', 'w') as csvfile:
fieldnames = ['Fechas', str(lista2[2][0]).replace(" ", ""), str(lista2[2][1]).replace(" ", ""),str(lista2[2][2]).replace(" ", ""),str(lista2[2][3]).replace(" ", ""),str(lista2[2][4]).replace(" ", ""),str(lista2[2][5]).replace(" ", ""),str(lista2[2][6]).replace(" ", ""),str(lista2[2][7]).replace(" ", ""),str(lista2[2][8]).replace(" ", ""),str(lista2[2][9]).replace(" ", ""),str(lista2[2][10]).replace(" ", ""),str(lista2[2][11]).replace(" ", ""),str(lista2[2][12]).replace(" ", ""),str(lista2[2][13]).replace(" ", ""),str(lista2[2][14]).replace(" ", ""),str(lista2[2][15]).replace(" ", ""),str(lista2[2][16]).replace(" ", ""),str(lista2[2][17]).replace(" ", ""),str(lista2[2][18]).replace(" ", "")]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(lista2[0])): ## LEN de todos los datos de fechas
writer.writerow({'Fechas': lista2[0][i][0][0],str(lista2[2][0]).replace(" ", "") : lista2[1][i][0] ,str(lista2[2][1]).replace(" ", "") : lista2[1][i][1] ,str(lista2[2][2]).replace(" ", "") : lista2[1][i][2] ,str(lista2[2][3]).replace(" ", "") : lista2[1][i][3] ,str(lista2[2][4]).replace(" ", "") : lista2[1][i][4] ,str(lista2[2][5]).replace(" ", "") : lista2[1][i][5] ,str(lista2[2][6]).replace(" ", "") : lista2[1][i][6] ,str(lista2[2][7]).replace(" ", "") : lista2[1][i][7] ,str(lista2[2][8]).replace(" ", "") : lista2[1][i][8] ,str(lista2[2][9]).replace(" ", "") : lista2[1][i][9] ,str(lista2[2][10]).replace(" ", "") : lista2[1][i][10] ,str(lista2[2][11]).replace(" ", "") : lista2[1][i][11] ,str(lista2[2][12]).replace(" ", "") : lista2[1][i][12] ,str(lista2[2][13]).replace(" ", "") : lista2[1][i][13] ,str(lista2[2][14]).replace(" ", "") : lista2[1][i][14] ,str(lista2[2][15]).replace(" ", "") : lista2[1][i][15] ,str(lista2[2][16]).replace(" ", "") : lista2[1][i][16] ,str(lista2[2][17]).replace(" ", "") : lista2[1][i][17] ,str(lista2[2][18]).replace(" ", "") : lista2[1][i][18]})
tiempo3=time.time()
print 'Me demoro ' + str(tiempo3-tiempo2) + 'segundos'
################ DIAS
print '-------- Imprimir por dias-----------'
estructura3 = estructura1['Dias']
lista2 =estructura3[0][0][0][0][0][0]
for j in range(18):
nombre= str(lista2[2][j]).replace(" ", "")
with open('./csv/dias/'+nombre +'_dias'+'.csv', 'w') as csvfile:
fieldnames = ['Fechas', nombre ]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(lista2[0])): ## LEN de todos los datos de fechas
writer.writerow({'Fechas': lista2[0][i][0][0], nombre : lista2[1][i][j]})
nombre='Datos_radiacion'
with open('./csv/dias/' + nombre +'_dias'+'.csv', 'w') as csvfile:
fieldnames = ['Fechas', str(lista2[2][0]).replace(" ", ""), str(lista2[2][1]).replace(" ", ""),str(lista2[2][2]).replace(" ", "")]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(lista2[0])): ## LEN de todos los datos de fechas
writer.writerow({'Fechas': lista2[0][i][0][0], str(lista2[2][0]).replace(" ", ""): lista2[1][i][0] , str(lista2[2][1]).replace(" ", ""): lista2[1][i][1] ,str(lista2[2][2]).replace(" ", ""): lista2[1][i][0]})
nombre='Datos_temperatura'
with open('./csv/dias/' + nombre +'_dias'+'.csv', 'w') as csvfile:
fieldnames = ['Fechas', str(lista2[2][4]).replace(" ", ""), str(lista2[2][5]).replace(" ", ""),str(lista2[2][6]).replace(" ", "")]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(lista2[0])): ## LEN de todos los datos de fechas
writer.writerow({'Fechas': lista2[0][i][0][0], str(lista2[2][4]).replace(" ", ""): lista2[1][i][4] , str(lista2[2][5]).replace(" ", ""): lista2[1][i][5] ,str(lista2[2][6]).replace(" ", ""): lista2[1][i][6]})
nombre= 'Todos'
with open('./csv/dias/' + nombre +'_dias'+'.csv', 'w') as csvfile:
fieldnames = ['Fechas', str(lista2[2][0]).replace(" ", ""), str(lista2[2][1]).replace(" ", ""),str(lista2[2][2]).replace(" ", ""),str(lista2[2][3]).replace(" ", ""),str(lista2[2][4]).replace(" ", ""),str(lista2[2][5]).replace(" ", ""),str(lista2[2][6]).replace(" ", ""),str(lista2[2][7]).replace(" ", ""),str(lista2[2][8]).replace(" ", ""),str(lista2[2][9]).replace(" ", ""),str(lista2[2][10]).replace(" ", ""),str(lista2[2][11]).replace(" ", ""),str(lista2[2][12]).replace(" ", ""),str(lista2[2][13]).replace(" ", ""),str(lista2[2][14]).replace(" ", ""),str(lista2[2][15]).replace(" ", ""),str(lista2[2][16]).replace(" ", ""),str(lista2[2][17]).replace(" ", ""),str(lista2[2][18]).replace(" ", "")]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(lista2[0])): ## LEN de todos los datos de fechas
writer.writerow({'Fechas': lista2[0][i][0][0],str(lista2[2][0]).replace(" ", "") : lista2[1][i][0] ,str(lista2[2][1]).replace(" ", "") : lista2[1][i][1] ,str(lista2[2][2]).replace(" ", "") : lista2[1][i][2] ,str(lista2[2][3]).replace(" ", "") : lista2[1][i][3] ,str(lista2[2][4]).replace(" ", "") : lista2[1][i][4] ,str(lista2[2][5]).replace(" ", "") : lista2[1][i][5] ,str(lista2[2][6]).replace(" ", "") : lista2[1][i][6] ,str(lista2[2][7]).replace(" ", "") : lista2[1][i][7] ,str(lista2[2][8]).replace(" ", "") : lista2[1][i][8] ,str(lista2[2][9]).replace(" ", "") : lista2[1][i][9] ,str(lista2[2][10]).replace(" ", "") : lista2[1][i][10] ,str(lista2[2][11]).replace(" ", "") : lista2[1][i][11] ,str(lista2[2][12]).replace(" ", "") : lista2[1][i][12] ,str(lista2[2][13]).replace(" ", "") : lista2[1][i][13] ,str(lista2[2][14]).replace(" ", "") : lista2[1][i][14] ,str(lista2[2][15]).replace(" ", "") : lista2[1][i][15] ,str(lista2[2][16]).replace(" ", "") : lista2[1][i][16] ,str(lista2[2][17]).replace(" ", "") : lista2[1][i][17] ,str(lista2[2][18]).replace(" ", "") : lista2[1][i][18]})
tiempo4=time.time()
print 'Me demoro ' + str(tiempo4-tiempo3) + 'segundos'
| 80.878981
| 1,100
| 0.56316
| 1,820
| 12,698
| 3.887912
| 0.073077
| 0.132278
| 0.146976
| 0.105992
| 0.851328
| 0.841577
| 0.819389
| 0.807094
| 0.805823
| 0.804975
| 0
| 0.070797
| 0.135691
| 12,698
| 156
| 1,101
| 81.397436
| 0.573941
| 0.087179
| 0
| 0.708333
| 0
| 0
| 0.104196
| 0.003702
| 0
| 0
| 0
| 0.00641
| 0
| 0
| null | null | 0
| 0.025
| null | null | 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
88737dbeb9ef2dfa25bfceefcffc3d61ec4dcf9f
| 6,021
|
py
|
Python
|
tests/test_encoded_queries.py
|
keleog/pyserini
|
3cd6b7ee8e77d699726756938fac0714c10ad0a9
|
[
"Apache-2.0"
] | null | null | null |
tests/test_encoded_queries.py
|
keleog/pyserini
|
3cd6b7ee8e77d699726756938fac0714c10ad0a9
|
[
"Apache-2.0"
] | null | null | null |
tests/test_encoded_queries.py
|
keleog/pyserini
|
3cd6b7ee8e77d699726756938fac0714c10ad0a9
|
[
"Apache-2.0"
] | null | null | null |
#
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import unittest
from pyserini.dsearch import QueryEncoder
from pyserini.search import get_topics
class TestEncodedQueries(unittest.TestCase):
def setUp(self):
os.environ['PYSERINI_CACHE'] = 'temp_dir'
def test_tct_colbert_msmarco_passage_dev_subset(self):
encoder = QueryEncoder.load_encoded_queries('tct_colbert-msmarco-passage-dev-subset')
topics = get_topics('msmarco-passage-dev-subset')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_ance_msmarco_passage_dev_subset(self):
encoder = QueryEncoder.load_encoded_queries('ance-msmarco-passage-dev-subset')
topics = get_topics('msmarco-passage-dev-subset')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_tct_colbert_msmarco_doc_dev(self):
encoder = QueryEncoder.load_encoded_queries('tct_colbert-msmarco-doc-dev')
topics = get_topics('msmarco-doc-dev')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_ance_maxp_msmarco_doc_dev(self):
encoder = QueryEncoder.load_encoded_queries('ance_maxp-msmarco-doc-dev')
topics = get_topics('msmarco-doc-dev')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_sbert_msmarco_passage_dev_subset(self):
encoder = QueryEncoder.load_encoded_queries('sbert-msmarco-passage-dev-subset')
topics = get_topics('msmarco-passage-dev-subset')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_distilbert_kd_msmarco_passage_dev_subset(self):
encoder = QueryEncoder.load_encoded_queries('distilbert_kd-msmarco-passage-dev-subset')
topics = get_topics('msmarco-passage-dev-subset')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_dpr_multi_nq_dev(self):
encoder = QueryEncoder.load_encoded_queries('dpr_multi-nq-dev')
topics = get_topics('dpr-nq-dev')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_dpr_multi_nq_test(self):
encoder = QueryEncoder.load_encoded_queries('dpr_multi-nq-test')
topics = get_topics('dpr-nq-test')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_ance_multi_nq_dev(self):
encoder = QueryEncoder.load_encoded_queries('ance_multi-nq-dev')
topics = get_topics('dpr-nq-dev')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_ance_multi_nq_test(self):
encoder = QueryEncoder.load_encoded_queries('ance_multi-nq-test')
topics = get_topics('dpr-nq-test')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_dpr_multi_trivia_dev(self):
encoder = QueryEncoder.load_encoded_queries('dpr_multi-trivia-dev')
topics = get_topics('dpr-trivia-dev')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_dpr_multi_trivia_test(self):
encoder = QueryEncoder.load_encoded_queries('dpr_multi-trivia-test')
topics = get_topics('dpr-trivia-test')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_ance_multi_trivia_dev(self):
encoder = QueryEncoder.load_encoded_queries('ance_multi-trivia-dev')
topics = get_topics('dpr-trivia-dev')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_ance_multi_trivia_test(self):
encoder = QueryEncoder.load_encoded_queries('ance_multi-trivia-test')
topics = get_topics('dpr-trivia-test')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_dpr_multi_wq_test(self):
encoder = QueryEncoder.load_encoded_queries('dpr_multi-wq-test')
topics = get_topics('dpr-wq-test')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_dpr_multi_squad_test(self):
encoder = QueryEncoder.load_encoded_queries('dpr_multi-squad-test')
topics = get_topics('dpr-squad-test')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_dpr_multi_curated_test(self):
encoder = QueryEncoder.load_encoded_queries('dpr_multi-curated-test')
topics = get_topics('dpr-curated-test')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_dpr_single_nq_dev(self):
encoder = QueryEncoder.load_encoded_queries('dpr_single_nq-nq-dev')
topics = get_topics('dpr-nq-dev')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def test_dpr_single_nq_test(self):
encoder = QueryEncoder.load_encoded_queries('dpr_single_nq-nq-test')
topics = get_topics('dpr-nq-test')
for t in topics:
self.assertTrue(topics[t]['title'] in encoder.embedding)
def tearDown(self):
if os.path.exists('temp_dir'):
shutil.rmtree('temp_dir')
| 40.959184
| 95
| 0.689254
| 806
| 6,021
| 4.944169
| 0.148883
| 0.045169
| 0.109661
| 0.128733
| 0.810038
| 0.784693
| 0.776161
| 0.775157
| 0.774153
| 0.604015
| 0
| 0.000836
| 0.204949
| 6,021
| 146
| 96
| 41.239726
| 0.831627
| 0.09799
| 0
| 0.509434
| 0
| 0
| 0.161404
| 0.074608
| 0
| 0
| 0
| 0
| 0.179245
| 1
| 0.198113
| false
| 0.113208
| 0.04717
| 0
| 0.254717
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
888f20a8d87d9dbd9bf342fda0d10b044a79a078
| 758
|
py
|
Python
|
envi/tests/msp430/ipush.py
|
rnui2k/vivisect
|
b7b00f2d03defef28b4b8c912e3a8016e956c5f7
|
[
"ECL-2.0",
"Apache-2.0"
] | 716
|
2015-01-01T14:41:11.000Z
|
2022-03-28T06:51:50.000Z
|
envi/tests/msp430/ipush.py
|
rnui2k/vivisect
|
b7b00f2d03defef28b4b8c912e3a8016e956c5f7
|
[
"ECL-2.0",
"Apache-2.0"
] | 266
|
2015-01-01T15:07:27.000Z
|
2022-03-30T15:19:26.000Z
|
envi/tests/msp430/ipush.py
|
rnui2k/vivisect
|
b7b00f2d03defef28b4b8c912e3a8016e956c5f7
|
[
"ECL-2.0",
"Apache-2.0"
] | 159
|
2015-01-01T16:19:44.000Z
|
2022-03-21T21:55:34.000Z
|
from envi.archs.msp430.regs import *
checks = [
# PUSH
(
'PUSH R15',
{ 'regs': [(REG_SP, 0x1004), (REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f12", 'data': "112233445566" },
{ 'regs': [(REG_SP, 0x1002), (REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f12", 'data': "1122bbaa5566" }
),
# PUSH.b
(
'PUSH.b R15',
{ 'regs': [(REG_SP, 0x1004), (REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f12", 'data': "112233445566" },
{ 'regs': [(REG_SP, 0x1002), (REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f12", 'data': "1122bb445566" }
),
]
| 42.111111
| 153
| 0.489446
| 111
| 758
| 3.126126
| 0.261261
| 0.103746
| 0.103746
| 0.195965
| 0.778098
| 0.778098
| 0.778098
| 0.778098
| 0.778098
| 0.778098
| 0
| 0.186644
| 0.229551
| 758
| 17
| 154
| 44.588235
| 0.407534
| 0.014512
| 0
| 0.153846
| 0
| 0
| 0.201613
| 0
| 0
| 0
| 0.064516
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ee109f5a06e53f1ef4e781b9342770d42cda64bf
| 10,206
|
py
|
Python
|
src/test_macrobatch.py
|
lzamparo/embedding
|
9d2aea3fc38698c3fdfcc7ee29269aeff62a5ee4
|
[
"MIT"
] | null | null | null |
src/test_macrobatch.py
|
lzamparo/embedding
|
9d2aea3fc38698c3fdfcc7ee29269aeff62a5ee4
|
[
"MIT"
] | null | null | null |
src/test_macrobatch.py
|
lzamparo/embedding
|
9d2aea3fc38698c3fdfcc7ee29269aeff62a5ee4
|
[
"MIT"
] | null | null | null |
import sys
import os
import numpy as np
import yaml
from probe2vec.noise_contrast import get_noise_contrastive_loss, get_noise_contrastive_nonsymbolic_values
from probe2vec.embedding_utils import SequenceParser
from probe2vec.w2v import assemble_model_components
from probe2vec import embedding_utils
# Only import theano and lasagne if environment permits it
from theano import function
import lasagne
#from lasagne.layers import (
#get_output, InputLayer, EmbeddingLayer, get_all_params,
#get_all_param_values
#)
#from lasagne.init import Normal
from lasagne.updates import nesterov_momentum, adam
# load the params from the yaml file given in sys.argv[1]
def test_macrobatch_for_nan(params_yaml):
with open(params_yaml) as f:
params = yaml.load(f)
# parse params from yaml file
data_dir = os.path.abspath(params['data_dir'])
selex_save_dir = os.path.abspath(params['save_dir'])
fasta_files = [os.path.join(data_dir,f) for f in os.listdir(data_dir) if f.endswith(params['file_suffixes'])]
load_dir = params.get('load_dir',None)
num_processes = params.get('num_processes', 3)
mb_size = params.get('macrobatch_size', 100000)
num_embedding_dimensions = params.get('num_embedding_dimensions', 100)
num_epochs = params.get('num_epochs',20)
outfile = params.get('outfile', None)
kernel = params.get('kernel', [1,2,3,4,5,5,4,3,2,1])
min_frequency = params.get('min_frequency',0)
verbose=True
batch_size = params.get('batch_size', 1000)
learning_rate = params.get('learning_rate', 0.01)
momentum = params.get('momentum', 0.9)
read_data_async = params.get('read_data_async', True)
# create sequence parser from yaml config file
parser = SequenceParser(**params)
params['parser'] = parser
# connect dataset reader to fasta files
params['files'] = fasta_files
### set up what is needed for embedding test: embedder, reader, minibatcher,
reader, minibatcher, embedder = assemble_model_components(**params)
# Architecture is ready. Make the loss function, and use it to create
# the parameter updates responsible for learning
loss = get_noise_contrastive_loss(embedder.get_output(), batch_size)
updates = adam(loss, embedder.get_params())
#nesterov_momentum(
#loss, embedder.get_params(), learning_rate, momentum
#)
# Include minibatcher updates, which cause the symbolic batch to move
# through the dataset like a sliding window
updates.update(minibatcher.get_updates())
# Use the loss function and the updates to compile a training function.
# Note that it takes no inputs because the dataset is fully loaded using
# theano shared variables
train = function([], loss, updates=updates)
macrobatch_limit = 1
if read_data_async:
macrobatches = reader.generate_dataset_parallel()
else:
macrobatches = reader.generate_dataset_serial()
macrobatch_num = 0
for signal_macrobatch, noise_macrobatch in macrobatches:
macrobatch_num += 1
if verbose:
print('running macrobatch %d' % (macrobatch_num - 1))
minibatcher.load_dataset(signal_macrobatch, noise_macrobatch)
losses = []
for batch_num in range(minibatcher.get_num_batches()):
batch_loss = train()
if batch_num == 49:
values = embedder.get_param_values()
if not np.isnan(batch_loss):
losses.append(batch_loss)
else:
values = embedder.get_param_values()
print("Warning: NaN loss reported for batch", batch_num, " of epoch ", epoch)
print('\tmacrobatch average loss: %f' % np.mean(losses))
if not np.isnan(np.mean(losses)):
return True
else:
return False
def test_macrobatch_at_fifty(params_yaml):
''' training falls into NaN problems at 50 iterations, and not because of
anything to do with the parameters of the encoder / decoder (picks up fine
in the next macrobatch. So what is different between the 49th, 50th and 51st
mini-batch of each macrobatch? '''
with open(params_yaml) as f:
params = yaml.load(f)
# parse params from yaml file
data_dir = os.path.abspath(params['data_dir'])
selex_save_dir = os.path.abspath(params['save_dir'])
fasta_files = [os.path.join(data_dir,f) for f in os.listdir(data_dir) if f.endswith(params['file_suffixes'])]
load_dir = params.get('load_dir',None)
num_processes = params.get('num_processes', 3)
mb_size = params.get('macrobatch_size', 100000)
num_embedding_dimensions = params.get('num_embedding_dimensions', 100)
num_epochs = params.get('num_epochs',20)
outfile = params.get('outfile', None)
kernel = params.get('kernel', [1,2,3,4,5,5,4,3,2,1])
min_frequency = params.get('min_frequency',0)
verbose=True
batch_size = params.get('batch_size', 1000)
learning_rate = params.get('learning_rate', 0.01)
momentum = params.get('momentum', 0.9)
read_data_async = params.get('read_data_async', True)
# create sequence parser from yaml config file
parser = SequenceParser(**params)
params['parser'] = parser
# connect dataset reader to fasta files
params['files'] = fasta_files
### set up what is needed for embedding test: embedder, reader, minibatcher,
reader, minibatcher, embedder = assemble_model_components(**params)
# Architecture is ready. Make the loss function, and use it to create
# the parameter updates responsible for learning
loss = get_noise_contrastive_loss(embedder.get_output(), batch_size)
updates = adam(loss, embedder.get_params())
# Include minibatcher updates, which cause the symbolic batch to move
# through the dataset like a sliding window
updates.update(minibatcher.get_updates())
# Use the loss function and the updates to compile a training function.
# Note that it takes no inputs because the dataset is fully loaded using
# theano shared variables
train = function([], loss, updates=updates)
train_with_outputs = function([], [loss, embedder.get_output()], updates=updates)
macrobatch_limit = 1
if read_data_async:
macrobatches = reader.generate_dataset_parallel()
else:
macrobatches = reader.generate_dataset_serial()
macrobatch_num = 0
for signal_macrobatch, noise_macrobatch in macrobatches:
macrobatch_num += 1
if verbose:
print('running macrobatch %d' % (macrobatch_num - 1))
minibatcher.load_dataset(signal_macrobatch, noise_macrobatch)
losses = []
for batch_num in range(minibatcher.get_num_batches()):
if batch_num < 49:
batch_loss = train()
if batch_num == 50:
# get the values for this minibatch
still_okay_batch = minibatcher.get_batch()
okay_batch_signals = minibatcher.signal_examples.get_value()
okay_batch_noise = minibatcher.noise_examples.get_value()
batch_loss, batch_activation = train_with_outputs()
nploss = get_noise_contrastive_nonsymbolic_values(batch_activation, batch_size)
signal_tokens = [(reader.unigram_dictionary.get_token(q),reader.unigram_dictionary.get_token(c)) for q,c in okay_batch_signals]
if batch_num == 51:
# get the values for this minibatch
bogus_batch = minibatcher.get_batch()
bogus_signals = minibatcher.signal_examples.get_value()
bogus_noise = minibatcher.noise_examples.get_value()
batch_loss, batch_activation = train_with_outputs()
nploss = get_noise_contrastive_nonsymbolic_values(batch_activation, batch_size)
if batch_num > 51:
batch_loss = train()
if not np.isnan(batch_loss):
print("Batch ", batch_num, " got numeric loss ", batch_loss)
losses.append(batch_loss)
else:
print("Warning: NaN loss reported for batch", batch_num, " of epoch ", epoch)
print('\tmacrobatch average loss: %f' % np.mean(losses))
if not np.isnan(np.mean(losses)):
return True
else:
return False
params_yaml = sys.argv[1]
#test_macrobatch_at_fifty(params_yaml)
nan_status_test = test_macrobatch_for_nan(params_yaml)
assert(nan_status_test)
| 44.960352
| 160
| 0.571037
| 1,109
| 10,206
| 5.045987
| 0.197475
| 0.040207
| 0.020372
| 0.011437
| 0.807184
| 0.779664
| 0.719085
| 0.706934
| 0.706934
| 0.706934
| 0
| 0.014733
| 0.354889
| 10,206
| 227
| 161
| 44.960352
| 0.835207
| 0.179796
| 0
| 0.777778
| 0
| 0
| 0.069854
| 0.005781
| 0
| 0
| 0
| 0
| 0.007407
| 1
| 0.014815
| false
| 0
| 0.081481
| 0
| 0.125926
| 0.051852
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ee2e60842e000f442f9bb8fe7a19bd7637443d7b
| 9,802
|
py
|
Python
|
backend/api/migrations/0116_add_categories_to_approved_fuel.py
|
amichard/tfrs
|
ed3973016cc5c2ae48999d550a23b41a5ddad807
|
[
"Apache-2.0"
] | 18
|
2017-05-10T21:55:11.000Z
|
2021-03-01T16:41:32.000Z
|
backend/api/migrations/0116_add_categories_to_approved_fuel.py
|
amichard/tfrs
|
ed3973016cc5c2ae48999d550a23b41a5ddad807
|
[
"Apache-2.0"
] | 1,167
|
2017-03-04T00:18:43.000Z
|
2022-03-03T22:31:51.000Z
|
backend/api/migrations/0116_add_categories_to_approved_fuel.py
|
amichard/tfrs
|
ed3973016cc5c2ae48999d550a23b41a5ddad807
|
[
"Apache-2.0"
] | 48
|
2017-03-09T17:19:39.000Z
|
2022-02-24T16:38:17.000Z
|
from django.db import migrations
from django.db.migrations import RunPython
def add_categories(apps, schema_editor):
"""
Adds additional fuel types for credit calculation
"""
db_alias = schema_editor.connection.alias
approved_fuel = apps.get_model('api', 'ApprovedFuel')
default_carbon_intensity_category = apps.get_model(
'api', 'DefaultCarbonIntensityCategory')
energy_density_category = apps.get_model(
'api', 'EnergyDensityCategory')
energy_effectiveness_ratio_category = apps.get_model(
'api', 'EnergyEffectivenessRatioCategory')
approved_fuel.objects.using(db_alias).filter(
name="Biodiesel"
).update(
default_carbon_intensity_category=default_carbon_intensity_category
.objects.using(db_alias).get(
name="Renewable Fuel in relation to diesel class fuel"
),
energy_density_category=energy_density_category
.objects.using(db_alias).get(
name="Biodiesel"
),
energy_effectiveness_ratio_category=energy_effectiveness_ratio_category
.objects.using(db_alias).get(
name="Petroleum-based diesel fuel or renewable fuel in relation "
"to diesel class fuel"
)
)
approved_fuel.objects.using(db_alias).filter(
name="CNG"
).update(
default_carbon_intensity_category=default_carbon_intensity_category
.objects.using(db_alias).get(
name="CNG"
),
energy_density_category=energy_density_category
.objects.using(db_alias).get(
name="CNG"
),
energy_effectiveness_ratio_category=energy_effectiveness_ratio_category
.objects.using(db_alias).get(
name="CNG"
)
)
approved_fuel.objects.using(db_alias).filter(
name="Electricity"
).update(
default_carbon_intensity_category=default_carbon_intensity_category
.objects.using(db_alias).get(
name="Electricity"
),
energy_density_category=energy_density_category
.objects.using(db_alias).get(
name="Electricity"
),
energy_effectiveness_ratio_category=energy_effectiveness_ratio_category
.objects.using(db_alias).get(
name="Electricity"
)
)
approved_fuel.objects.using(db_alias).filter(
name="Ethanol"
).update(
default_carbon_intensity_category=default_carbon_intensity_category
.objects.using(db_alias).get(
name="Renewable Fuel in relation to gasoline class fuel"
),
energy_density_category=energy_density_category
.objects.using(db_alias).get(
name="Ethanol"
),
energy_effectiveness_ratio_category=energy_effectiveness_ratio_category
.objects.using(db_alias).get(
name="Petroleum-based gasoline, natural gas-based gasoline or "
"renewable fuel in relation to gasoline class fuel"
)
)
approved_fuel.objects.using(db_alias).filter(
name="HDRD"
).update(
default_carbon_intensity_category=default_carbon_intensity_category
.objects.using(db_alias).get(
name="Renewable Fuel in relation to diesel class fuel"
),
energy_density_category=energy_density_category
.objects.using(db_alias).get(
name="Hydrogenation-derived renewable diesel fuel"
),
energy_effectiveness_ratio_category=energy_effectiveness_ratio_category
.objects.using(db_alias).get(
name="Petroleum-based diesel fuel or renewable fuel in relation "
"to diesel class fuel"
)
)
approved_fuel.objects.using(db_alias).filter(
name="Hydrogen"
).update(
default_carbon_intensity_category=default_carbon_intensity_category
.objects.using(db_alias).get(
name="Hydrogen"
),
energy_density_category=energy_density_category
.objects.using(db_alias).get(
name="Hydrogen"
),
energy_effectiveness_ratio_category=energy_effectiveness_ratio_category
.objects.using(db_alias).get(
name="Hydrogen"
)
)
approved_fuel.objects.using(db_alias).filter(
name="LNG"
).update(
default_carbon_intensity_category=default_carbon_intensity_category
.objects.using(db_alias).get(
name="LNG"
),
energy_density_category=energy_density_category
.objects.using(db_alias).get(
name="LNG"
),
energy_effectiveness_ratio_category=energy_effectiveness_ratio_category
.objects.using(db_alias).get(
name="LNG"
)
)
approved_fuel.objects.using(db_alias).filter(
name="Natural gas-based gasoline"
).update(
default_carbon_intensity_category=default_carbon_intensity_category
.objects.using(db_alias).get(
name="Natural gas-based gasoline"
),
energy_density_category=energy_density_category
.objects.using(db_alias).get(
name="Petroleum-based gasoline, natural gas-based gasoline or "
"gasoline produced from biomass"
),
energy_effectiveness_ratio_category=energy_effectiveness_ratio_category
.objects.using(db_alias).get(
name="Petroleum-based gasoline, natural gas-based gasoline or "
"renewable fuel in relation to gasoline class fuel"
)
)
approved_fuel.objects.using(db_alias).filter(
name="Petroleum-based diesel"
).update(
default_carbon_intensity_category=default_carbon_intensity_category
.objects.using(db_alias).get(
name="Petroleum-based diesel"
),
energy_density_category=energy_density_category
.objects.using(db_alias).get(
name="Petroleum-based diesel fuel or diesel fuel produced from "
"biomass"
),
energy_effectiveness_ratio_category=energy_effectiveness_ratio_category
.objects.using(db_alias).get(
name="Petroleum-based diesel fuel or renewable fuel in relation "
"to diesel class fuel"
)
)
approved_fuel.objects.using(db_alias).filter(
name="Petroleum-based gasoline"
).update(
default_carbon_intensity_category=default_carbon_intensity_category
.objects.using(db_alias).get(
name="Petroleum-based gasoline"
),
energy_density_category=energy_density_category
.objects.using(db_alias).get(
name="Petroleum-based gasoline, natural gas-based gasoline or "
"gasoline produced from biomass"
),
energy_effectiveness_ratio_category=energy_effectiveness_ratio_category
.objects.using(db_alias).get(
name="Petroleum-based gasoline, natural gas-based gasoline or "
"renewable fuel in relation to gasoline class fuel"
)
)
approved_fuel.objects.using(db_alias).filter(
name="Propane"
).update(
default_carbon_intensity_category=default_carbon_intensity_category
.objects.using(db_alias).get(
name="Propane"
),
energy_density_category=energy_density_category
.objects.using(db_alias).get(
name="Propane"
),
energy_effectiveness_ratio_category=energy_effectiveness_ratio_category
.objects.using(db_alias).get(
name="Propane"
)
)
approved_fuel.objects.using(db_alias).filter(
name="Renewable diesel"
).update(
default_carbon_intensity_category=default_carbon_intensity_category
.objects.using(db_alias).get(
name="Petroleum-based diesel"
),
energy_density_category=energy_density_category
.objects.using(db_alias).get(
name="Petroleum-based diesel fuel or diesel fuel produced from "
"biomass"
),
energy_effectiveness_ratio_category=energy_effectiveness_ratio_category
.objects.using(db_alias).get(
name="Petroleum-based diesel fuel or renewable fuel in relation "
"to diesel class fuel"
)
)
approved_fuel.objects.using(db_alias).filter(
name="Renewable gasoline"
).update(
default_carbon_intensity_category=default_carbon_intensity_category
.objects.using(db_alias).get(
name="Petroleum-based gasoline"
),
energy_density_category=energy_density_category
.objects.using(db_alias).get(
name="Petroleum-based gasoline, natural gas-based gasoline or "
"gasoline produced from biomass"
),
energy_effectiveness_ratio_category=energy_effectiveness_ratio_category
.objects.using(db_alias).get(
name="Petroleum-based gasoline, natural gas-based gasoline or "
"renewable fuel in relation to gasoline class fuel"
)
)
def remove_categories(apps, schema_editor):
"""
Removes the credit calculation fuel types
"""
db_alias = schema_editor.connection.alias
approved_fuel = apps.get_model('api', 'ApprovedFuel')
approved_fuel.objects.using(db_alias).update(
default_carbon_intensity_category=None,
energy_density_category=None,
energy_effectiveness_ratio_category=None
)
class Migration(migrations.Migration):
"""
Attaches the functions for the migrations
"""
dependencies = [
('api', '0115_auto_20190411_1615'),
]
operations = [
RunPython(
add_categories,
remove_categories
)
]
| 34.758865
| 79
| 0.655683
| 1,024
| 9,802
| 5.972656
| 0.077148
| 0.06295
| 0.121321
| 0.16465
| 0.889307
| 0.868869
| 0.8638
| 0.8638
| 0.823741
| 0.80363
| 0
| 0.002214
| 0.262804
| 9,802
| 281
| 80
| 34.882562
| 0.844174
| 0.013569
| 0
| 0.776
| 0
| 0
| 0.191209
| 0.013198
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008
| false
| 0
| 0.008
| 0
| 0.028
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ee620329f76a4e620742add8ecc6f1735df9583f
| 125
|
py
|
Python
|
bootleg/embeddings/__init__.py
|
mleszczy/bootleg
|
162d74001cdfbbe146753393641d549e0328acb1
|
[
"Apache-2.0"
] | 1
|
2021-01-11T18:40:09.000Z
|
2021-01-11T18:40:09.000Z
|
bootleg/embeddings/__init__.py
|
mleszczy/bootleg
|
162d74001cdfbbe146753393641d549e0328acb1
|
[
"Apache-2.0"
] | null | null | null |
bootleg/embeddings/__init__.py
|
mleszczy/bootleg
|
162d74001cdfbbe146753393641d549e0328acb1
|
[
"Apache-2.0"
] | null | null | null |
from .base_emb import *
from .entity_embs import *
from .type_embs import *
from .title_embs import *
from .kg_embs import *
| 20.833333
| 26
| 0.76
| 20
| 125
| 4.5
| 0.45
| 0.444444
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 125
| 5
| 27
| 25
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ee7623b4458f6c0475f6efc42ae285e926bfe235
| 4,380
|
py
|
Python
|
lib/axon/test/test_errors.py
|
intellimath/pyaxon
|
fcadf741bedd71fdb21d6e8b865da2a22f2bd1fb
|
[
"MIT"
] | 19
|
2015-03-02T19:38:51.000Z
|
2021-11-16T13:48:04.000Z
|
lib/axon/test/test_errors.py
|
intellimath/pyaxon
|
fcadf741bedd71fdb21d6e8b865da2a22f2bd1fb
|
[
"MIT"
] | null | null | null |
lib/axon/test/test_errors.py
|
intellimath/pyaxon
|
fcadf741bedd71fdb21d6e8b865da2a22f2bd1fb
|
[
"MIT"
] | 4
|
2015-02-07T13:29:43.000Z
|
2020-01-01T19:20:53.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
from axon import loads, dumps
from axon.errors import AxonError
class AxonErrorTestCase(unittest.TestCase):
def setUp(self):
pass
def test_unexpected_end_1(self):
text = 'a{'
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_11(self):
text = '{'
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_12(self):
text = '['
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_2(self):
text = '"abc'
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_3(self):
text = '''"abc
qwertyuiop
'''
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_4(self):
text = 'a{name:'
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_41(self):
text = '{name:'
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_5(self):
text = 'a{name:}'
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_51(self):
text = '{name:}'
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_52(self):
text = '{name:"anne" 17}'
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_6(self):
text = 'a{name:1 age:'
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_61(self):
text = '{name:1 age:'
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_7(self):
text = '[1 2 '
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_71(self):
text = '[1 2 a:'
try:
vs = loads(text)
except AxonError:
pass
def test_unexpected_end_72(self):
text = '[1 2 a:7'
try:
vs = loads(text)
except AxonError:
pass
def test_invalid_indent_1(self):
text = \
'''
aaa
a:1
b: 2
'''
try:
vs = loads(text)
except AxonError:
pass
def test_invalid_indent_2(self):
text = \
'''
aaa
100
200
'''
try:
vs = loads(text)
except AxonError:
pass
def test_invalid_indent_3(self):
text = \
'''
aaa
a:1
b: 2
'''
try:
vs = loads(text)
except AxonError:
pass
def test_invalid_indent_4(self):
text = \
'''
aaa
100
200
'''
try:
vs = loads(text)
except AxonError:
pass
#
#
#
def test_invalid_indent_11(self):
text = \
'''
aaa
bbb
a:1
b: 2
'''
try:
vs = loads(text)
except AxonError:
pass
def test_invalid_indent_12(self):
text = \
'''
aaa
bbb:
100
200
'''
try:
vs = loads(text)
except AxonError:
pass
def test_invalid_indent_13(self):
text = \
'''
aaa
bbb
a:1
b: 2
'''
try:
vs = loads(text)
except AxonError:
pass
def test_invalid_indent_14(self):
text = \
'''
aaa
bbb
100
200
'''
try:
vs = loads(text)
except AxonError:
pass
def test_invalid_indent_15(self):
text = \
'''
aaa
bbb
100
200
ccc
a:1
b:2
'''
try:
vs = loads(text)
except AxonError:
pass
def test_invalid_key_1(self):
try:
text = dumps([{('foo', 'bar'): True}])
except TypeError:
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AxonErrorTestCase))
return suite
| 17.450199
| 56
| 0.470091
| 465
| 4,380
| 4.255914
| 0.152688
| 0.091966
| 0.138959
| 0.169783
| 0.788277
| 0.765538
| 0.755432
| 0.755432
| 0.755432
| 0.755432
| 0
| 0.036556
| 0.4379
| 4,380
| 250
| 57
| 17.52
| 0.767262
| 0.002968
| 0
| 0.666667
| 0
| 0
| 0.03375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.160494
| 0.024691
| 0
| 0.203704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
a01a2860be01675481dceaa0a94c9e14017bb495
| 2,434
|
py
|
Python
|
python/lib/measure/compute_sliding_slope.py
|
timtyree/bgmc
|
891e003a9594be9e40c53822879421c2b8c44eed
|
[
"MIT"
] | null | null | null |
python/lib/measure/compute_sliding_slope.py
|
timtyree/bgmc
|
891e003a9594be9e40c53822879421c2b8c44eed
|
[
"MIT"
] | null | null | null |
python/lib/measure/compute_sliding_slope.py
|
timtyree/bgmc
|
891e003a9594be9e40c53822879421c2b8c44eed
|
[
"MIT"
] | null | null | null |
from .compute_slope import *
import numpy as np
def compute_sliding_slope_loglog(x_values,y_values,x_min=None,window_width=None,stepsize=None):
'''x_values and y_values are numpy arrays of the values before computing the log-log values.
Example Usage:
xavg_values,slope_values,Rsquared_values = compute_sliding_slope_loglog(x_values,y_values)
'''
if x_min is None:
x_min=np.min(x_values)
if stepsize is None:
stepsize=np.mean(np.diff(x_values))
if window_width is None:
window_width=30*stepsize
x_min_values=np.arange(x_min,np.max(x_values)-window_width,stepsize)
#compute the slope over a sliding window
slope_lst=[];Rsquared_lst=[];
for x_min in x_min_values:
#get the slice
x_max=x_min+window_width
boo=(x_values>=x_min)&(x_values<=x_max)
#measure the slope
dict_output=compute_95CI_ols(np.log(x_values[boo]), np.log(y_values[boo]))
slope=dict_output['m']
Rsquared=dict_output['Rsquared']
slope_lst.append(slope)
Rsquared_lst.append(Rsquared)
slope_values=np.array(slope_lst)
Rsquared_values=np.array(Rsquared_lst)
xavg_values=(x_min_values+window_width/2)
return xavg_values,slope_values,Rsquared_values
def compute_sliding_slope_linlin(x_values,y_values,x_min=None,window_width=None,stepsize=None):
'''x_values and y_values are numpy arrays of the values.
Example Usage:
xavg_values,slope_values,Rsquared_values = compute_sliding_slope_loglog(x_values,y_values)
'''
if x_min is None:
x_min=np.min(x_values)
if stepsize is None:
stepsize=np.mean(np.diff(x_values))
if window_width is None:
window_width=40*stepsize
x_min_values=np.arange(x_min,np.max(x_values)-window_width,stepsize)
#compute the slope over a sliding window
slope_lst=[];Rsquared_lst=[];
for x_min in x_min_values:
#get the slice
x_max=x_min+window_width
boo=(x_values>=x_min)&(x_values<=x_max)
#measure the slope
dict_output=compute_95CI_ols(x_values[boo], y_values[boo])
slope=dict_output['m']
Rsquared=dict_output['Rsquared']
slope_lst.append(slope)
Rsquared_lst.append(Rsquared)
slope_values=np.array(slope_lst)
Rsquared_values=np.array(Rsquared_lst)
xavg_values=(x_min_values+window_width/2)
return xavg_values,slope_values,Rsquared_values
| 37.446154
| 96
| 0.714873
| 382
| 2,434
| 4.23822
| 0.149215
| 0.049413
| 0.03706
| 0.034589
| 0.922174
| 0.922174
| 0.922174
| 0.922174
| 0.906733
| 0.906733
| 0
| 0.005063
| 0.188578
| 2,434
| 64
| 97
| 38.03125
| 0.814684
| 0.202958
| 0
| 0.818182
| 0
| 0
| 0.009484
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.045455
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4e4a0c42f04041c7858787ac5ec146062513ebcd
| 35
|
py
|
Python
|
boa3_test/example/function_test/StringFunction.py
|
jplippi/neo3-boa
|
052be4adebb665113715bb80067d954f7ad85ad5
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/example/function_test/StringFunction.py
|
jplippi/neo3-boa
|
052be4adebb665113715bb80067d954f7ad85ad5
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/example/function_test/StringFunction.py
|
jplippi/neo3-boa
|
052be4adebb665113715bb80067d954f7ad85ad5
|
[
"Apache-2.0"
] | null | null | null |
def Main() -> str:
return '42'
| 11.666667
| 18
| 0.514286
| 5
| 35
| 3.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0.285714
| 35
| 2
| 19
| 17.5
| 0.64
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
4e63c27ad3bddc04758eb4e9a7dfe837b1d69447
| 1,696
|
py
|
Python
|
plugins/dbnd-spark/benchmark/benchmark_histograms.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 224
|
2020-01-02T10:46:37.000Z
|
2022-03-02T13:54:08.000Z
|
plugins/dbnd-spark/benchmark/benchmark_histograms.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 16
|
2020-03-11T09:37:58.000Z
|
2022-01-26T10:22:08.000Z
|
plugins/dbnd-spark/benchmark/benchmark_histograms.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 24
|
2020-03-24T13:53:50.000Z
|
2022-03-22T11:55:18.000Z
|
from .utils import generate_reports
def test_histograms_and_columns_variations():
exp_name = "histograms_and_columns_variations"
exp_params = {
"input_file": [
"s3://dbnd-dev-playground/data/benchmark_10_columns_1M_rows.csv"
],
"output_file": ["out"],
"to_pandas": [False],
"with_histograms": [True],
"sampling_type": [None],
"sampling_fraction": [None],
"columns_number_multiplicator": [1.5, 2, 3, 5],
}
generate_reports(exp_name, exp_params)
def test_histograms_default():
exp_name = "histograms_default"
exp_params = {
"input_file": [
"s3://dbnd-dev-playground/data/benchmark_10_columns_1M_rows.csv",
"s3://dbnd-dev-playground/data/benchmark_10_columns_10M_rows.csv",
],
"output_file": ["out"],
"to_pandas": [False],
"with_histograms": [False, True],
"sampling_type": [None],
"sampling_fraction": [None],
"columns_number_multiplicator": [1],
}
generate_reports(exp_name, exp_params)
def test_histograms_and_sampling_variations():
exp_name = "histograms_and_sampling_variations"
exp_params = {
"input_file": [
"s3://dbnd-dev-playground/data/benchmark_10_columns_1M_rows.csv",
"s3://dbnd-dev-playground/data/benchmark_10_columns_10M_rows.csv",
],
"output_file": ["out"],
"to_pandas": [False],
"with_histograms": [True],
"sampling_type": ["random", "first"],
"sampling_fraction": [0.01, 0.05, 0.1, 0.2, 0.5],
"columns_number_multiplicator": [1],
}
generate_reports(exp_name, exp_params)
| 30.836364
| 78
| 0.618514
| 194
| 1,696
| 5.010309
| 0.257732
| 0.04321
| 0.046296
| 0.097737
| 0.880658
| 0.771605
| 0.771605
| 0.771605
| 0.771605
| 0.704733
| 0
| 0.031758
| 0.238797
| 1,696
| 54
| 79
| 31.407407
| 0.721146
| 0
| 0
| 0.666667
| 1
| 0
| 0.428066
| 0.272995
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.022222
| 0
| 0.088889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4ea623be4bebe3790d52c8662931450840d57aa3
| 23,204
|
py
|
Python
|
py/libbolt_tests.py
|
jakinyele/libbolt
|
595cd87b9036416cc2c9992038a5a04f5f4e70fa
|
[
"MIT"
] | 45
|
2018-08-14T21:08:42.000Z
|
2018-08-27T21:42:06.000Z
|
py/libbolt_tests.py
|
jakinyele/libbolt
|
595cd87b9036416cc2c9992038a5a04f5f4e70fa
|
[
"MIT"
] | 14
|
2019-06-16T14:34:26.000Z
|
2021-04-29T05:38:40.000Z
|
py/libbolt_tests.py
|
jakinyele/libbolt
|
595cd87b9036416cc2c9992038a5a04f5f4e70fa
|
[
"MIT"
] | 7
|
2018-08-14T21:50:43.000Z
|
2018-08-18T20:40:34.000Z
|
import unittest
import libbolt
import ast, random, json
def rand_hex(stringLength=10):
"""Generate a random hex string of fixed length """
hex_letters = '0123456789abcdef'
return ''.join(random.choice(hex_letters) for i in range(stringLength))
def malformed_token(token):
token_dict = ast.literal_eval(token)
updated_token = {}
for k,v in token_dict.items():
if type(v) == str:
updated_token[k] = v[:-4] + rand_hex(4)
else:
updated_token[k] = v
return json.dumps(updated_token)
def malformed_proof(proof):
bad_proof = proof.replace("0", "1")
bad_proof = bad_proof.replace("1", "2")
return bad_proof
class BoltEstablishTests(unittest.TestCase):
def setUp(self):
self.bolt = libbolt.Libbolt('target/{}/{}bolt.{}'.format(libbolt.mode, libbolt.prefix, libbolt.ext))
self.channel_state = self.bolt.channel_setup("Test Channel")
self.b0_cust = 1000
self.b0_merch = 100
(self.channel_token, self.merch_state, self.channel_state) = self.bolt.bidirectional_init_merchant(self.channel_state, "Bob")
(channel_token, self.cust_state) = self.bolt.bidirectional_init_customer(self.channel_token, self.b0_cust, self.b0_merch, "Alice")
# generate some bad stuff here
larger_b0_cust = 2000
(channel_token_bad, self.cust_state_bad) = self.bolt.bidirectional_init_customer(self.channel_token, larger_b0_cust, self.b0_merch, "Alice")
# set them
self.channel_token = channel_token
self.channel_token_bad = channel_token_bad
def test_establish_works_okay(self):
"""
Establish protocol common case works
"""
(channel_token, cust_state, com, com_proof) = self.bolt.bidirectional_establish_customer_generate_proof(self.channel_token, self.cust_state)
cust_state_dict = json.loads(cust_state)
close_token = self.bolt.bidirectional_establish_merchant_issue_close_token(self.channel_state, com, com_proof, cust_state_dict["wallet"]["channelId"], self.b0_cust, self.b0_merch, self.merch_state)
self.assertTrue(close_token is not None)
(is_token_valid, channel_state, cust_state) = self.bolt.bidirectional_establish_customer_verify_close_token(self.channel_state, cust_state, close_token)
self.assertTrue(is_token_valid)
pay_token = self.bolt.bidirectional_establish_merchant_issue_pay_token(channel_state, com, self.merch_state)
self.assertTrue(pay_token is not None)
(is_channel_established, channel_state, cust_state) = self.bolt.bidirectional_establish_customer_final(channel_state, cust_state, pay_token)
self.assertTrue(is_channel_established)
#print("Establish protocol works as expected.")
def test_establish_merchant_issue_close_token_fail_as_expected(self):
"""
Initial com proof fails as expected when commitment opening doesn't match expected initial customer and merchant balances
"""
(channel_token, cust_state, com, com_proof) = self.bolt.bidirectional_establish_customer_generate_proof(self.channel_token_bad, self.cust_state_bad)
cust_state_dict = json.loads(cust_state)
close_token = self.bolt.bidirectional_establish_merchant_issue_close_token(self.channel_state, com, com_proof, cust_state_dict["wallet"]["channelId"], self.b0_cust, self.b0_merch, self.merch_state)
self.assertTrue(close_token is None)
#print("Establish protocol fail works as expected.")
def test_establish_customer_verify_close_token_fail_as_expected(self):
"""
Not-signed close token fails to verify
"""
close_token = json.dumps({"h":"b896166d76a7bd02565b6431dca27da4c290e234edfbca8d9189f78311e18f66a138684c91efdf7fd1c4b192bf27f68e",
"H":"add6c20994749185fb7d44f8f5f1f3dbbcd250e4922a9c6c9017c25dda670d94c4b279b7f0fccd56916bf737a29a1938"})
(channel_token, cust_state, com, com_proof) = self.bolt.bidirectional_establish_customer_generate_proof(self.channel_token, self.cust_state)
(is_token_valid, channel_state, cust_state) = self.bolt.bidirectional_establish_customer_verify_close_token(self.channel_state, cust_state, close_token)
self.assertTrue(is_token_valid is False)
def test_establish_merchant_issue_pay_token_fail_as_expected(self):
"""
Specifying a different commitment leads to an invalid pay token as expected
"""
bad_com = json.dumps({"c":"852a57e24a2192e1cea19157e44f92d58369751f2012bc1f4a4312a89a63c74a92a4cb1d362b37ae0eda3b3bd1333502"})
(channel_token, cust_state, com, com_proof) = self.bolt.bidirectional_establish_customer_generate_proof(self.channel_token, self.cust_state)
cust_state_dict = json.loads(cust_state)
close_token = self.bolt.bidirectional_establish_merchant_issue_close_token(self.channel_state, com, com_proof, cust_state_dict["wallet"]["channelId"], self.b0_cust, self.b0_merch, self.merch_state)
self.assertTrue(close_token is not None)
(is_token_valid, channel_state, cust_state) = self.bolt.bidirectional_establish_customer_verify_close_token(self.channel_state, cust_state, close_token)
self.assertTrue(is_token_valid)
pay_token = self.bolt.bidirectional_establish_merchant_issue_pay_token(channel_state, bad_com, self.merch_state)
self.assertTrue(pay_token is not None)
(is_channel_established, channel_state, cust_state) = self.bolt.bidirectional_establish_customer_final(channel_state, cust_state, pay_token)
self.assertFalse(is_channel_established)
def test_establish_not_complete_without_close_token(self):
"""
Test that missing close token prevents the customer from establishing
"""
(channel_token, cust_state, com, com_proof) = self.bolt.bidirectional_establish_customer_generate_proof(self.channel_token, self.cust_state)
pay_token = self.bolt.bidirectional_establish_merchant_issue_pay_token(self.channel_state, com, self.merch_state)
self.assertTrue(pay_token is not None)
(is_channel_established, channel_state, cust_state) = self.bolt.bidirectional_establish_customer_final(self.channel_state, cust_state, pay_token)
self.assertFalse(is_channel_established)
def test_error_handling_with_serialization(self):
"""
Test that malformed close and/or pay token results in failure
:return:
"""
(channel_token, cust_state, com, com_proof) = self.bolt.bidirectional_establish_customer_generate_proof(self.channel_token, self.cust_state)
cust_state_dict = json.loads(cust_state)
close_token = self.bolt.bidirectional_establish_merchant_issue_close_token(self.channel_state, com, com_proof, cust_state_dict["wallet"]["channelId"], self.b0_cust, self.b0_merch, self.merch_state)
self.assertTrue(close_token is not None)
malformed_close_token = malformed_token(close_token)
(is_token_valid, bad_channel_state, bad_cust_state) = self.bolt.bidirectional_establish_customer_verify_close_token(self.channel_state, cust_state, malformed_close_token)
self.assertTrue(is_token_valid is None)
(is_token_valid, self.channel_state, cust_state) = self.bolt.bidirectional_establish_customer_verify_close_token(self.channel_state, cust_state, close_token)
pay_token = self.bolt.bidirectional_establish_merchant_issue_pay_token(self.channel_state, com, self.merch_state)
malformed_pay_token = malformed_token(pay_token)
(is_channel_established, channel_state, cust_state) = self.bolt.bidirectional_establish_customer_final(self.channel_state, cust_state, malformed_pay_token)
self.assertFalse(is_channel_established)
class BoltPayTests(unittest.TestCase):
def setUp(self):
"""
Setup init customer/merchant state and establish phase of Bolt protocol
:return:
"""
self.bolt = libbolt.Libbolt('target/{}/{}bolt.{}'.format(libbolt.mode, libbolt.prefix, libbolt.ext))
self.channel_state = self.bolt.channel_setup("Test Channel")
self.b0_cust = 500
self.b0_merch = 10
(self.channel_token, self.merch_state, self.channel_state) = self.bolt.bidirectional_init_merchant(self.channel_state, "Bob")
(self.channel_token, self.cust_state) = self.bolt.bidirectional_init_customer(self.channel_token, self.b0_cust, self.b0_merch, "Alice")
(self.channel_token, self.cust_state, com, com_proof) = self.bolt.bidirectional_establish_customer_generate_proof(self.channel_token, self.cust_state)
cust_state_dict = json.loads(self.cust_state)
close_token = self.bolt.bidirectional_establish_merchant_issue_close_token(self.channel_state, com, com_proof, cust_state_dict["wallet"]["channelId"], self.b0_cust, self.b0_merch, self.merch_state)
self.assertTrue(close_token is not None)
(is_token_valid, self.channel_state, self.cust_state) = self.bolt.bidirectional_establish_customer_verify_close_token(self.channel_state, self.cust_state, close_token)
self.assertTrue(is_token_valid)
pay_token = self.bolt.bidirectional_establish_merchant_issue_pay_token(self.channel_state, com, self.merch_state)
self.assertTrue(pay_token is not None)
(is_channel_established, self.channel_state, self.cust_state) = self.bolt.bidirectional_establish_customer_final(self.channel_state, self.cust_state, pay_token)
self.assertTrue(is_channel_established)
def test_pay_protocol_works(self):
"""
Payment protocol works
:return:
"""
amount = 10
(payment_proof, new_cust_state) = self.bolt.bidirectional_pay_generate_payment_proof(self.channel_state, self.cust_state, amount)
(new_close_token, self.merch_state) = self.bolt.bidirectional_pay_verify_payment_proof(self.channel_state, payment_proof, self.merch_state)
(revoke_token, self.cust_state) = self.bolt.bidirectional_pay_generate_revoke_token(self.channel_state, self.cust_state, new_cust_state, new_close_token)
(pay_token, self.merch_state) = self.bolt.bidirectional_pay_verify_revoke_token(revoke_token, self.merch_state)
(self.cust_state, is_pay_valid) = self.bolt.bidirectional_pay_verify_payment_token(self.channel_state, self.cust_state, pay_token)
self.assertTrue(is_pay_valid)
def test_pay_protocol_bad_payment_proof_fail_handled(self):
"""
Payment protocol fails as expected when customer sends a bad payment proof
:return:
"""
amount = 15
(payment_proof, new_cust_state) = self.bolt.bidirectional_pay_generate_payment_proof(self.channel_state, self.cust_state, amount)
bad_payment_proof = malformed_proof(payment_proof)
(new_close_token, self.merch_state) = self.bolt.bidirectional_pay_verify_payment_proof(self.channel_state, bad_payment_proof, self.merch_state)
self.assertTrue(new_close_token is None)
def test_pay_protocol_bad_close_token_fail_handled(self):
"""
Payment protocol fails as expected when merchant returns a malformed/bad close token
:return:
"""
amount = 10
(payment_proof, new_cust_state) = self.bolt.bidirectional_pay_generate_payment_proof(self.channel_state, self.cust_state, amount)
(new_close_token, self.merch_state) = self.bolt.bidirectional_pay_verify_payment_proof(self.channel_state, payment_proof, self.merch_state)
bad_close_token = malformed_token(new_close_token)
(revoke_token, self.cust_state) = self.bolt.bidirectional_pay_generate_revoke_token(self.channel_state, self.cust_state, new_cust_state, bad_close_token)
self.assertTrue(revoke_token is None)
def test_pay_protocol_bad_revoke_token_fail_handled(self):
"""
Payment protocol fails as expected when customer sends a bad revoke token
:return:
"""
amount = 20
(payment_proof, new_cust_state) = self.bolt.bidirectional_pay_generate_payment_proof(self.channel_state, self.cust_state, amount)
(new_close_token, self.merch_state) = self.bolt.bidirectional_pay_verify_payment_proof(self.channel_state, payment_proof, self.merch_state)
(revoke_token, self.cust_state) = self.bolt.bidirectional_pay_generate_revoke_token(self.channel_state, self.cust_state, new_cust_state, new_close_token)
bad_revoke_token = malformed_token(revoke_token)
(pay_token, merch_state) = self.bolt.bidirectional_pay_verify_revoke_token(bad_revoke_token, self.merch_state)
self.assertTrue(pay_token is None)
def test_pay_protocol_bad_payment_token_fail_handled(self):
"""
Payment protocol fails as expected when merchant returns a malformed pay token
:return:
"""
amount = 25
(payment_proof, new_cust_state) = self.bolt.bidirectional_pay_generate_payment_proof(self.channel_state, self.cust_state, amount)
(new_close_token, self.merch_state) = self.bolt.bidirectional_pay_verify_payment_proof(self.channel_state, payment_proof, self.merch_state)
(revoke_token, self.cust_state) = self.bolt.bidirectional_pay_generate_revoke_token(self.channel_state, self.cust_state, new_cust_state, new_close_token)
(pay_token, self.merch_state) = self.bolt.bidirectional_pay_verify_revoke_token(revoke_token, self.merch_state)
bad_pay_token = malformed_token(pay_token)
(cust_state, is_pay_valid) = self.bolt.bidirectional_pay_verify_payment_token(self.channel_state, self.cust_state, bad_pay_token)
self.assertTrue(is_pay_valid is None)
class BoltMultiChannelTests(unittest.TestCase):
def setUp(self):
"""
Setup init customer/merchant state and establish phase of Bolt protocol
:return:
"""
self.bolt = libbolt.Libbolt('target/{}/{}bolt.{}'.format(libbolt.mode, libbolt.prefix, libbolt.ext))
self.channel_state = self.bolt.channel_setup("Test Channel")
self.b0_alice = self.b0_charlie = 150
self.b0_merch = 5
(self.channel_token, self.merch_state, self.channel_state) = self.bolt.bidirectional_init_merchant(self.channel_state, "Bob")
(self.channel_token_a, self.alice_state) = self.bolt.bidirectional_init_customer(self.channel_token, self.b0_alice, self.b0_merch, "Alice")
(self.channel_token_c, self.charlie_state) = self.bolt.bidirectional_init_customer(self.channel_token, self.b0_charlie, self.b0_merch, "Charlie")
def _establish_channel(self, channel_token, channel_state, cust_state, pkc, b0_cust, b0_merch):
(channel_token, cust_state, com, com_proof) = self.bolt.bidirectional_establish_customer_generate_proof(channel_token, cust_state)
close_token = self.bolt.bidirectional_establish_merchant_issue_close_token(channel_state, com, com_proof, pkc, b0_cust, b0_merch, self.merch_state)
self.assertTrue(close_token is not None)
(is_token_valid, channel_state, cust_state) = self.bolt.bidirectional_establish_customer_verify_close_token(channel_state, cust_state, close_token)
self.assertTrue(is_token_valid)
pay_token = self.bolt.bidirectional_establish_merchant_issue_pay_token(channel_state, com, self.merch_state)
self.assertTrue(pay_token is not None)
(is_channel_established, channel_state, cust_state) = self.bolt.bidirectional_establish_customer_final(channel_state, cust_state, pay_token)
self.assertTrue(is_channel_established)
return channel_token, channel_state, cust_state
def _pay_on_channel(self, channel_state, cust_state, amount):
(payment_proof, new_cust_state) = self.bolt.bidirectional_pay_generate_payment_proof(channel_state, cust_state, amount)
(new_close_token, self.merch_state) = self.bolt.bidirectional_pay_verify_payment_proof(channel_state, payment_proof, self.merch_state)
(revoke_token, cust_state) = self.bolt.bidirectional_pay_generate_revoke_token(channel_state, cust_state, new_cust_state, new_close_token)
(pay_token, self.merch_state) = self.bolt.bidirectional_pay_verify_revoke_token(revoke_token, self.merch_state)
(cust_state, is_pay_valid) = self.bolt.bidirectional_pay_verify_payment_token(channel_state, cust_state, pay_token)
self.assertTrue(is_pay_valid)
return channel_state, cust_state
def test_multiple_channels_work(self):
"""Establishing concurrent channels with a merchant works as expected
"""
alice_cust_state_dict = json.loads(self.alice_state)
self.channel_token_a, self.channel_state_a, alice_cust_state = self._establish_channel(self.channel_token_a, self.channel_state,
self.alice_state, alice_cust_state_dict["wallet"]["channelId"],
self.b0_alice, self.b0_merch)
charlie_cust_state_dict = json.loads(self.charlie_state)
self.channel_token_c, self.channel_state_c, charlie_cust_state = self._establish_channel(self.channel_token_c, self.channel_state,
self.charlie_state, charlie_cust_state_dict["wallet"]["channelId"],
self.b0_charlie, self.b0_merch)
self.channel_state_a, alice_cust_state = self._pay_on_channel(self.channel_state_a, alice_cust_state, 15)
#print("Alice cust state => ", alice_cust_state)
self.channel_state_c, charlie_cust_state = self._pay_on_channel(self.channel_state_c, charlie_cust_state, 10)
self.channel_state_c, charlie_cust_state = self._pay_on_channel(self.channel_state_c, charlie_cust_state, 20)
#print("Charlie cust state => ", charlie_cust_state)
alice_bal = json.loads(alice_cust_state)["cust_balance"]
charlie_bal = json.loads(charlie_cust_state)["cust_balance"]
self.assertTrue(alice_bal != charlie_bal)
class BoltIntermediaryTests(unittest.TestCase):
def setUp(self):
"""
Setup init alice/bob/intermediary state and establish phase of Bolt protocol
:return:
"""
self.bolt = libbolt.Libbolt('target/{}/{}bolt.{}'.format(libbolt.mode, libbolt.prefix, libbolt.ext))
self.channel_state = self.bolt.channel_setup("Test Channel")
self.b0_alice = self.b0_bob = 100
self.b0_intermediary = 100
(self.channel_token, self.merch_state, self.channel_state) = self.bolt.bidirectional_init_merchant(self.channel_state, "Hub")
(self.channel_token_a, self.alice_state) = self.bolt.bidirectional_init_customer(self.channel_token, self.b0_alice, self.b0_intermediary, "Alice")
(self.channel_token_c, self.bob_state) = self.bolt.bidirectional_init_customer(self.channel_token, self.b0_bob, self.b0_intermediary, "Bob")
def _establish_channel(self, channel_token, channel_state, cust_state, pkc, b0_cust, b0_merch):
(channel_token, cust_state, com, com_proof) = self.bolt.bidirectional_establish_customer_generate_proof(channel_token, cust_state)
close_token = self.bolt.bidirectional_establish_merchant_issue_close_token(channel_state, com, com_proof, pkc, b0_cust, b0_merch, self.merch_state)
self.assertTrue(close_token is not None)
(is_token_valid, channel_state, cust_state) = self.bolt.bidirectional_establish_customer_verify_close_token(channel_state, cust_state, close_token)
self.assertTrue(is_token_valid)
pay_token = self.bolt.bidirectional_establish_merchant_issue_pay_token(channel_state, com, self.merch_state)
self.assertTrue(pay_token is not None)
(is_channel_established, channel_state, cust_state) = self.bolt.bidirectional_establish_customer_final(channel_state, cust_state, pay_token)
self.assertTrue(is_channel_established)
return channel_token, channel_state, cust_state
def test_payment_with_intermediary_works(self):
"""Making a payment using an intermediary works
"""
alice_cust_state_dict = json.loads(self.alice_state)
self.channel_token_a, self.channel_state_a, alice_cust_state = self._establish_channel(self.channel_token_a, self.channel_state,
self.alice_state, alice_cust_state_dict["wallet"]["channelId"],
self.b0_alice, self.b0_intermediary)
bob_cust_state_dict = json.loads(self.bob_state)
self.channel_token_b, self.channel_state_c, bob_cust_state = self._establish_channel(self.channel_token_c, self.channel_state,
self.bob_state, bob_cust_state_dict["wallet"]["channelId"],
self.b0_bob, self.b0_intermediary)
#A prepares payment A -> I
(payment_proof_a, new_alice_cust_state) = self.bolt.bidirectional_pay_generate_payment_proof(self.channel_state, alice_cust_state, 10)
#B prepares payment I -> B
(payment_proof_b, new_bob_cust_state) = self.bolt.bidirectional_pay_generate_payment_proof(self.channel_state, bob_cust_state, -10)
#I verifies payment proofs
(new_close_token_a, cond_close_token_b, self.merch_state) = self.bolt.bidirectional_pay_verify_multiple_payment_proofs(self.channel_state, payment_proof_a, payment_proof_b, self.merch_state)
#A generates revoke token
(revoke_token_a, alice_cust_state) = self.bolt.bidirectional_pay_generate_revoke_token(self.channel_state, alice_cust_state, new_alice_cust_state, new_close_token_a)
#B generates revoke token
(revoke_token_b, bob_cust_state) = self.bolt.bidirectional_pay_generate_revoke_token(self.channel_state, bob_cust_state, new_bob_cust_state, cond_close_token_b)
#I verifies both revoke tokens
(pay_token_a, pay_token_b, self.merch_state) = self.bolt.bidirectional_pay_verify_multiple_revoke_tokens(revoke_token_a, revoke_token_b, self.merch_state)
#A verifies payment token
(alice_cust_state, is_pay_valid_a) = self.bolt.bidirectional_pay_verify_payment_token(self.channel_state, alice_cust_state, pay_token_a)
self.assertTrue(is_pay_valid_a)
#B verifies payment token
(bob_cust_state, is_pay_valid_b) = self.bolt.bidirectional_pay_verify_payment_token(self.channel_state, bob_cust_state, pay_token_b)
self.assertTrue(is_pay_valid_b)
alice_bal = json.loads(alice_cust_state)["cust_balance"]
bob_bal = json.loads(bob_cust_state)["cust_balance"]
self.assertTrue(alice_bal == 90)
self.assertTrue(bob_bal == 110)
if __name__ == '__main__':
unittest.main()
| 58.301508
| 205
| 0.732374
| 3,009
| 23,204
| 5.239614
| 0.061482
| 0.082773
| 0.10789
| 0.085754
| 0.824813
| 0.797412
| 0.7624
| 0.738298
| 0.715337
| 0.692757
| 0
| 0.015943
| 0.183632
| 23,204
| 397
| 206
| 58.448363
| 0.816344
| 0.071238
| 0
| 0.451754
| 1
| 0
| 0.031853
| 0.013631
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.100877
| false
| 0
| 0.013158
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
14e186cc35c88999c8d0877cae8181a1cd50ba3c
| 42
|
py
|
Python
|
test/test_appapy.py
|
AppalachiaInteractive/com.appalachia.python.common_py
|
6d04a041dfb883846919866b49d956721e9e785f
|
[
"MIT"
] | 1
|
2021-05-07T11:42:30.000Z
|
2021-05-07T11:42:30.000Z
|
test/test_appapy.py
|
ChristopherSchubert/com.appalachia.python.appapy
|
a966fd63817dcc3f97aeb3aaca2a339d9527e2ed
|
[
"MIT"
] | null | null | null |
test/test_appapy.py
|
ChristopherSchubert/com.appalachia.python.appapy
|
a966fd63817dcc3f97aeb3aaca2a339d9527e2ed
|
[
"MIT"
] | 1
|
2021-04-25T15:26:31.000Z
|
2021-04-25T15:26:31.000Z
|
import appapy
def test_main():
pass
| 7
| 16
| 0.666667
| 6
| 42
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261905
| 42
| 5
| 17
| 8.4
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
117bf46dca70dc48fcc984a5381487850b5b1404
| 29,182
|
py
|
Python
|
components/core/qcg/pilotjob/tests/test_fileinterface.py
|
LourensVeen/QCG-PilotJob
|
e78c35a9b16b1042a2d5b54352a2ca2e3a58c6b9
|
[
"Apache-2.0"
] | null | null | null |
components/core/qcg/pilotjob/tests/test_fileinterface.py
|
LourensVeen/QCG-PilotJob
|
e78c35a9b16b1042a2d5b54352a2ca2e3a58c6b9
|
[
"Apache-2.0"
] | null | null | null |
components/core/qcg/pilotjob/tests/test_fileinterface.py
|
LourensVeen/QCG-PilotJob
|
e78c35a9b16b1042a2d5b54352a2ca2e3a58c6b9
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import sys
import json
from os.path import abspath, join, isdir, exists
from os import stat
from shutil import rmtree
from string import Template
from datetime import datetime, timedelta
from qcg.pilotjob.service import QCGPMService
from qcg.pilotjob.joblist import Job, JobExecution, JobResources, ResourceSize, JobDependencies
from qcg.pilotjob.errors import IllegalJobDescription
from qcg.pilotjob.utils.auxdir import find_single_aux_dir
from qcg.pilotjob.tests.utils import save_reqs_to_file, check_job_status_in_json
def test_local_simple_job(tmpdir):
file_path = tmpdir.join('jobs.json')
print('tmpdir: {}'.format(str(tmpdir)))
jobName = 'mdate'
jobs = [job.to_dict() for job in [
Job(jobName,
JobExecution(
'date',
wd = abspath(tmpdir.join('date.sandbox')),
stdout = 'date.out',
stderr = 'date.err'
),
JobResources( numCores=ResourceSize(1) )
)
] ]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--log', 'debug', '--file', '--file-path', str(file_path), '--nodes', '2', '--wd', str(tmpdir),
'--report-format', 'json']
QCGPMService().start()
check_job_status_in_json([ jobName ], workdir=str(tmpdir), dest_state='SUCCEED')
assert all((isdir(abspath(tmpdir.join('date.sandbox'))),
exists(join(abspath(tmpdir.join('date.sandbox')), 'date.out')),
exists(join(abspath(tmpdir.join('date.sandbox')), 'date.err'))))
with pytest.raises(ValueError):
check_job_status_in_json([jobName + 'xxx'], workdir=str(tmpdir), dest_state='SUCCEED')
# rmtree(str(tmpdir))
def test_local_simple_script_job(tmpdir):
file_path = tmpdir.join('jobs.json')
print('tmpdir: {}'.format(str(tmpdir)))
jobName = 'mdate_script'
jobs = [job.to_dict() for job in [
Job(jobName,
JobExecution(
script = '/bin/date\n/bin/hostname\n',
wd = abspath(tmpdir.join('date.sandbox')),
stdout = 'date.out',
stderr = 'date.err'
),
JobResources( numCores=ResourceSize(1) )
)
] ]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--log', 'debug', '--file', '--file-path', str(file_path), '--nodes', '2', '--wd', str(tmpdir),
'--report-format', 'json']
QCGPMService().start()
check_job_status_in_json([ jobName ], workdir=str(tmpdir), dest_state='SUCCEED')
assert all((isdir(abspath(tmpdir.join('date.sandbox'))),
exists(join(abspath(tmpdir.join('date.sandbox')), 'date.out')),
exists(join(abspath(tmpdir.join('date.sandbox')), 'date.err')),
stat(join(abspath(tmpdir.join('date.sandbox')), 'date.out')).st_size > 0,
stat(join(abspath(tmpdir.join('date.sandbox')), 'date.err')).st_size == 0))
with pytest.raises(ValueError):
check_job_status_in_json([jobName + 'xxx'], workdir=str(tmpdir), dest_state='SUCCEED')
# rmtree(str(tmpdir))
def test_local_error_duplicate_name_job(tmpdir):
file_path = tmpdir.join('jobs.json')
print('tmpdir: {}'.format(str(tmpdir)))
jobName = 'mdate'
jobs = [job.to_dict() for job in [
Job(jobName,
JobExecution(
'date',
wd = abspath(tmpdir.join('date.sandbox')),
stdout = 'date.out',
stderr = 'date.err'
),
JobResources( numCores=ResourceSize(1) )
),
Job(jobName,
JobExecution(
'sleep',
wd=abspath(tmpdir.join('sleep.sandbox') ),
stdout='sleep.out',
stderr='sleep.err'
),
JobResources(numCores=ResourceSize(1))
)
] ]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--file', '--file-path', str(file_path), '--nodes', '2', '--wd', str(tmpdir),
'--report-format', 'json']
QCGPMService().start()
# no job should be executed due to the failed submit request with non-unique jobs inside
assert not isdir(abspath(tmpdir.join('date.sandbox')))
assert not isdir(abspath(tmpdir.join('sleep.sandbox')))
# rmtree(str(tmpdir))
def test_local_error_duplicate_name_job_separate_reqs(tmpdir):
file_path = tmpdir.join('jobs.json')
print('tmpdir: {}'.format(str(tmpdir)))
jobName = 'mdate'
jobs1 = [job.to_dict() for job in [
Job(jobName,
JobExecution(
'date',
wd = abspath(tmpdir.join('date.sandbox')),
stdout = 'date.out',
stderr = 'date.err'
),
JobResources( numCores=ResourceSize(1) )
) ] ]
jobs2 = [job.to_dict() for job in [
Job(jobName,
JobExecution(
'sleep',
wd=abspath(tmpdir.join('sleep.sandbox') ),
stdout='sleep.out',
stderr='sleep.err'
),
JobResources(numCores=ResourceSize(1))
)
] ]
reqs = [ { 'request': 'submit', 'jobs': jobs1 },
{ 'request': 'submit', 'jobs': jobs2 },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--file', '--file-path', str(file_path), '--nodes', '2', '--wd', str(tmpdir),
'--report-format', 'json']
QCGPMService().start()
# the first job (date) should execute
check_job_status_in_json([ jobName ], workdir=str(tmpdir), dest_state='SUCCEED')
assert all((isdir(abspath(tmpdir.join('date.sandbox'))),
exists(join(abspath(tmpdir.join('date.sandbox')), 'date.out')),
exists(join(abspath(tmpdir.join('date.sandbox')), 'date.err'))))
# the second job (sleep) due to the name clash should not execute
assert not isdir(abspath(tmpdir.join('sleep.sandbox')))
# rmtree(str(tmpdir))
def test_local_error_job_desc():
# missing job execution
with pytest.raises(IllegalJobDescription):
Job('error_job',
JobExecution(
None,
stdout = 'date.out',
stderr = 'date.err'
),
JobResources( numCores=ResourceSize(1) )
)
# wrong format of arguments
with pytest.raises(IllegalJobDescription):
Job('error_job',
JobExecution(
'date',
args = 'this should be a list',
stdout = 'date.out',
stderr = 'date.err'
),
JobResources( numCores=ResourceSize(1) )
)
# wrong format of environment
with pytest.raises(IllegalJobDescription):
Job('error_job',
JobExecution(
'date',
args = ['arg1'],
env = [ 'this shuld be a dict'],
stdout = 'date.out',
stderr = 'date.err'
),
JobResources( numCores=ResourceSize(1) )
)
# missing execution definition
with pytest.raises(IllegalJobDescription):
Job('error_job',
None,
JobResources( numCores=ResourceSize(1) )
)
# missing resources definition
with pytest.raises(IllegalJobDescription):
Job('error_job',
JobExecution(
'date',
args = ['arg1'],
env = [ 'this shuld be a dict'],
stdout = 'date.out',
stderr = 'date.err'
),
None
)
def test_local_simple_iter_job(tmpdir):
file_path = tmpdir.join('jobs.json')
print('tmpdir: {}'.format(str(tmpdir)))
jobName = "echo-iter"
nits = 10
jobs = [
{
"name": jobName,
"iteration": { "start": 0, "stop": nits },
"execution": {
"exec": "/bin/echo",
"args": ["iteration ${it}"],
"wd": abspath(tmpdir.join("{}_$${{it}}".format(jobName))),
"stdout": "echo-iter.stdout",
"stderr": "echo-iter.stderr"
},
"resources": {
"numCores": {
"exact": 1,
}
}
}
]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--log', 'debug', '--file', '--file-path', str(file_path), '--nodes', '2', '--wd',
str(tmpdir), '--report-format', 'json']
QCGPMService().start()
check_job_status_in_json([jobName] + ["{}:{}".format(jobName, i) for i in range(0, nits)], workdir=str(tmpdir),
dest_state='SUCCEED')
for i in range(0, nits):
wd_path = abspath(tmpdir.join("{}_{}".format(jobName, i)))
stdout_path = join(wd_path, 'echo-iter.stdout')
stderr_path = join(wd_path, 'echo-iter.stderr')
assert all((isdir(wd_path),
exists(stdout_path),
exists(stderr_path))), "stdout({}) and/or stderr({}) doesn't exist".format(stdout_path, stderr_path)
with open(stdout_path, 'r') as f:
assert f.read().strip() == Template("iteration ${it}").substitute(it=i)
rmtree(str(tmpdir))
def test_local_simple_uneven_resources_iter_job(tmpdir):
file_path = tmpdir.join('jobs.json')
print('tmpdir: {}'.format(str(tmpdir)))
jobName = "echo-iter"
nits = 10
jobs = [
{
"name": jobName,
"iteration": { "start": 0, "stop": nits },
"execution": {
"exec": "/bin/echo",
"args": ["iteration ${it}"],
"wd": abspath(tmpdir.join("{}_$${{it}}".format(jobName))),
"stdout": "echo-iter.stdout",
"stderr": "echo-iter.stderr"
},
"resources": {
"numCores": {
"exact": 2,
}
}
}
]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--log', 'debug', '--file', '--file-path', str(file_path), '--nodes', '3', '--wd',
str(tmpdir), '--report-format', 'json']
QCGPMService().start()
check_job_status_in_json([jobName] + ["{}:{}".format(jobName, i) for i in range(0, nits)], workdir=str(tmpdir),
dest_state='SUCCEED')
for i in range(0, nits):
wd_path = abspath(tmpdir.join("{}_{}".format(jobName, i)))
stdout_path = join(wd_path, 'echo-iter.stdout')
stderr_path = join(wd_path, 'echo-iter.stderr')
assert all((isdir(wd_path),
exists(stdout_path),
exists(stderr_path))), "stdout({}) and/or stderr({}) doesn't exist".format(stdout_path, stderr_path)
with open(stdout_path, 'r') as f:
assert f.read().strip() == Template("iteration ${it}").substitute(it=i)
rmtree(str(tmpdir))
def test_local_simple_uneven_resources_many_iter_jobs(tmpdir):
file_path = tmpdir.join('jobs.json')
print('tmpdir: {}'.format(str(tmpdir)))
bigJobName = "big-echo-iter"
smallJobName = "small-echo-iter"
nits = 10
jobs = [
{
"name": bigJobName,
"iteration": { "start": 0, "stop": nits },
"execution": {
"exec": "/bin/echo",
"args": ["iteration ${it}"],
"wd": abspath(tmpdir.join("{}_$${{it}}".format(bigJobName))),
"stdout": "echo-iter.stdout",
"stderr": "echo-iter.stderr"
},
"resources": {
"numCores": {
"exact": 2,
}
}
},
{
"name": smallJobName,
"iteration": { "start": 0, "stop": nits },
"execution": {
"exec": "/bin/echo",
"args": ["iteration ${it}"],
"wd": abspath(tmpdir.join("{}_$${{it}}".format(smallJobName))),
"stdout": "echo-iter.stdout",
"stderr": "echo-iter.stderr"
},
"resources": {
"numCores": {
"exact": 1,
}
}
}
]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--log', 'debug', '--file', '--file-path', str(file_path), '--nodes', '3', '--wd',
str(tmpdir), '--report-format', 'json']
QCGPMService().start()
check_job_status_in_json([bigJobName] + ["{}:{}".format(bigJobName, i) for i in range(0, nits)] +
[smallJobName] + ["{}:{}".format(smallJobName, i) for i in range(0, nits)],
workdir=str(tmpdir), dest_state='SUCCEED')
for jobName in [ bigJobName, smallJobName ]:
for i in range(0, nits):
wd_path = abspath(tmpdir.join("{}_{}".format(jobName, i)))
stdout_path = join(wd_path, 'echo-iter.stdout')
stderr_path = join(wd_path, 'echo-iter.stderr')
assert all((isdir(wd_path),
exists(stdout_path),
exists(stderr_path))), "stdout({}) and/or stderr({}) doesn't exist".format(stdout_path, stderr_path)
with open(stdout_path, 'r') as f:
assert f.read().strip() == Template("iteration ${it}").substitute(it=i)
rmtree(str(tmpdir))
def test_local_iter_scheduling_job_small(tmpdir):
file_path = tmpdir.join('jobs.json')
print('tmpdir: {}'.format(str(tmpdir)))
jobName = "sleep-iter"
nits = 4
jobSleepTime = 2
jobCores = 2
availCores = 4
rounds = nits * jobCores / availCores
totalExecTime = rounds * jobSleepTime
jobs = [
{
"name": jobName,
"iteration": { "stop": nits },
"execution": {
"exec": "/bin/sleep",
"args": ["{}s".format(str(jobSleepTime))],
"wd": abspath(tmpdir.join("{}_$${{it}}".format(jobName))),
"stdout": "sleep-iter.stdout",
"stderr": "sleep-iter.stderr"
},
"resources": {
"numCores": {
"exact": jobCores,
}
}
}
]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--log', 'debug', '--file', '--file-path', str(file_path), '--nodes', str(availCores),
'--wd', str(tmpdir), '--report-format', 'json']
QCGPMService().start()
check_job_status_in_json([jobName] + ["{}:{}".format(jobName, i) for i in range(0, nits)], workdir=str(tmpdir),
dest_state='SUCCEED')
for i in range(0, nits):
wd_path = abspath(tmpdir.join("{}_{}".format(jobName, i)))
stdout_path = join(wd_path, 'sleep-iter.stdout')
stderr_path = join(wd_path, 'sleep-iter.stderr')
assert all((isdir(wd_path),
exists(stdout_path),
exists(stderr_path))), "stdout({}) and/or stderr({}) doesn't exist".format(stdout_path, stderr_path)
with open(join(find_single_aux_dir(str(tmpdir)), 'jobs.report'), 'r') as f:
job_stats = [json.loads(line) for line in f.readlines() ]
assert len(job_stats) == nits + 1
min_start, max_finish = None, None
for i in range(0, nits):
job = job_stats[i]
print('readed job stats: {}'.format(str(job)))
t = datetime.strptime(job['runtime']['rtime'], "%H:%M:%S.%f")
rtime = timedelta(hours=t.hour, minutes=t.minute, seconds=t.second, microseconds=t.microsecond)
assert all((rtime.total_seconds() > jobSleepTime, rtime.total_seconds() < jobSleepTime + 0.5)), \
"job {} runtime exceeded assumed value {}s vs max {}s".format(i, rtime.total_seconds(), jobSleepTime + 0.5)
# find start executing time
exec_state = list(filter(lambda st_en: st_en['state'] == 'EXECUTING', job['history']))
assert len(exec_state) == 1
finish_state = list(filter(lambda st_en: st_en['state'] == 'SUCCEED', job['history']))
assert len(finish_state) == 1
start_time = datetime.strptime(exec_state[0]['date'], '%Y-%m-%dT%H:%M:%S.%f')
finish_time = datetime.strptime(finish_state[0]['date'], '%Y-%m-%dT%H:%M:%S.%f')
if not min_start or start_time < min_start:
min_start = start_time
if not max_finish or finish_time > max_finish:
max_finish = finish_time
assert all((min_start, finish_time))
# check if duration from executing first job till the end of last job is about 2 rounds, each with jobSleepTime
scenario_duration = finish_time - min_start
assert all((scenario_duration.total_seconds() > totalExecTime,
scenario_duration.total_seconds() < totalExecTime + 1)), \
"scenario duration runtime exceeded assumed value {}s vs max {}s".format(scenario_duration.total_seconds(),
totalExecTime + 1)
rmtree(str(tmpdir))
def test_local_iter_scheduling_job_large(tmpdir):
file_path = tmpdir.join('jobs.json')
print('tmpdir: {}'.format(str(tmpdir)))
jobName = "sleep-iter"
nits = 20
jobSleepTime = 2
jobCores = 2
availCores = 10
rounds = nits * jobCores / availCores
totalExecTime = rounds * jobSleepTime
jobs = [
{
"name": jobName,
"iteration": { "stop": nits },
"execution": {
"exec": "/bin/sleep",
"args": ["{}s".format(str(jobSleepTime))],
"wd": abspath(tmpdir.join("{}_$${{it}}".format(jobName))),
"stdout": "sleep-iter.stdout",
"stderr": "sleep-iter.stderr"
},
"resources": {
"numCores": {
"exact": jobCores,
}
}
}
]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--log', 'debug', '--file', '--file-path', str(file_path), '--nodes', str(availCores),
'--wd', str(tmpdir), '--report-format', 'json']
QCGPMService().start()
check_job_status_in_json([jobName] + ["{}:{}".format(jobName, i) for i in range(0, nits)], workdir=str(tmpdir),
dest_state='SUCCEED')
for i in range(0, nits):
wd_path = abspath(tmpdir.join("{}_{}".format(jobName, i)))
stdout_path = join(wd_path, 'sleep-iter.stdout')
stderr_path = join(wd_path, 'sleep-iter.stderr')
assert all((isdir(wd_path),
exists(stdout_path),
exists(stderr_path))), "stdout({}) and/or stderr({}) doesn't exist".format(stdout_path, stderr_path)
with open(join(find_single_aux_dir(str(tmpdir)), 'jobs.report'), 'r') as f:
job_stats = [json.loads(line) for line in f.readlines() ]
assert len(job_stats) == nits + 1
min_start, max_finish = None, None
for i in range(0, nits):
job = job_stats[i]
print('readed job stats: {}'.format(str(job)))
t = datetime.strptime(job['runtime']['rtime'], "%H:%M:%S.%f")
rtime = timedelta(hours=t.hour, minutes=t.minute, seconds=t.second, microseconds=t.microsecond)
assert all((rtime.total_seconds() > jobSleepTime, rtime.total_seconds() < jobSleepTime + 2)), \
"job {} runtime exceeded assumed value {}s vs max {}s".format(i, rtime.total_seconds(), jobSleepTime + 2)
# find start executing time
exec_state = list(filter(lambda st_en: st_en['state'] == 'EXECUTING', job['history']))
assert len(exec_state) == 1
finish_state = list(filter(lambda st_en: st_en['state'] == 'SUCCEED', job['history']))
assert len(finish_state) == 1
start_time = datetime.strptime(exec_state[0]['date'], '%Y-%m-%dT%H:%M:%S.%f')
finish_time = datetime.strptime(finish_state[0]['date'], '%Y-%m-%dT%H:%M:%S.%f')
if not min_start or start_time < min_start:
min_start = start_time
if not max_finish or finish_time > max_finish:
max_finish = finish_time
assert all((min_start, finish_time))
# check if duration from executing first job till the end of last job is about 2 rounds, each with jobSleepTime
scenario_duration = finish_time - min_start
assert all((scenario_duration.total_seconds() > totalExecTime,
scenario_duration.total_seconds() < totalExecTime + 4)), \
"scenario duration runtime exceeded assumed value {}s vs max {}s".format(scenario_duration.total_seconds(),
totalExecTime + 4)
rmtree(str(tmpdir))
def test_profile_local_iter_scheduling_job_large(tmpdir):
file_path = tmpdir.join('jobs.json')
print('tmpdir: {}'.format(str(tmpdir)))
jobName = "sleep-iter"
nits = 100
jobSleepTime = 2
jobCores = 2
availCores = 40
rounds = nits * jobCores / availCores
totalExecTime = rounds * jobSleepTime
jobs = [
{
"name": jobName,
"iteration": { "stop": nits },
"execution": {
"exec": "/bin/sleep",
"args": ["{}s".format(str(jobSleepTime))],
"wd": abspath(tmpdir.join("{}_$${{it}}".format(jobName))),
"stdout": "sleep-iter.stdout",
"stderr": "sleep-iter.stderr"
},
"resources": {
"numCores": {
"exact": jobCores,
}
}
}
]
reqs = [ { 'request': 'submit', 'jobs': jobs },
{ 'request': 'control', 'command': 'finishAfterAllTasksDone' } ]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--file', '--file-path', str(file_path), '--nodes', str(availCores), '--wd', str(tmpdir),
'--report-format', 'json']
QCGPMService().start()
check_job_status_in_json([jobName] + ["{}:{}".format(jobName, i) for i in range(0, nits)], workdir=str(tmpdir),
dest_state='SUCCEED')
for i in range(0, nits):
wd_path = abspath(tmpdir.join("{}_{}".format(jobName, i)))
stdout_path = join(wd_path, 'sleep-iter.stdout')
stderr_path = join(wd_path, 'sleep-iter.stderr')
assert all((isdir(wd_path),
exists(stdout_path),
exists(stderr_path))), "stdout({}) and/or stderr({}) doesn't exist".format(stdout_path, stderr_path)
rmtree(str(tmpdir))
def test_local_workflows(tmpdir):
file_path = tmpdir.join('jobs.json')
print('tmpdir: {}'.format(str(tmpdir)))
jobs = [job.to_dict() for job in [
Job('first',
JobExecution(
'sleep',
args=['2s'],
wd=abspath(tmpdir.join('first.sandbox')),
stdout='out',
stderr='err'
),
JobResources(numCores=ResourceSize(1))
),
Job('second',
JobExecution(
'sleep',
args=['1s'],
wd=abspath(tmpdir.join('second.sandbox')),
stdout='out',
stderr='err'
),
JobResources(numCores=ResourceSize(1)),
dependencies=JobDependencies(after=['first'])
),
Job('third',
JobExecution(
'date',
wd=abspath(tmpdir.join('third.sandbox')),
stdout='out',
stderr='err'
),
JobResources(numCores=ResourceSize(1)),
dependencies=JobDependencies(after=['first', 'second'])
)
] ]
reqs = [{'request': 'submit', 'jobs': jobs},
{'request': 'control', 'command': 'finishAfterAllTasksDone'}]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
# the ammount of resources should be enough to theoretically start all three job's at once
sys.argv = [ 'QCG-PilotJob', '--file', '--file-path', str(file_path), '--nodes', '4', '--wd', str(tmpdir),
'--report-format', 'json']
QCGPMService().start()
jnames = ['first', 'second', 'third']
check_job_status_in_json(jnames, workdir=str(tmpdir), dest_state='SUCCEED')
for jname in jnames:
assert all((isdir(abspath(tmpdir.join('{}.sandbox'.format(jname)))),
exists(join(abspath(tmpdir.join('{}.sandbox'.format(jname))), 'out')),
exists(join(abspath(tmpdir.join('{}.sandbox'.format(jname))), 'err'))))
with open(join(find_single_aux_dir(str(tmpdir)), 'jobs.report'), 'r') as f:
job_stats = [json.loads(line) for line in f.readlines() ]
assert len(job_stats) == len(jnames)
jstats = {}
for i in range(0, len(jnames)):
job = job_stats[i]
print('readed job stats: {}'.format(str(job)))
t = datetime.strptime(job['runtime']['rtime'], "%H:%M:%S.%f")
rtime = timedelta(hours=t.hour, minutes=t.minute, seconds=t.second, microseconds=t.microsecond)
# find start executing time
exec_state = list(filter(lambda st_en: st_en['state'] == 'EXECUTING', job['history']))
assert len(exec_state) == 1
# find finish executing time
finish_state = list(filter(lambda st_en: st_en['state'] == 'SUCCEED', job['history']))
assert len(finish_state) == 1
start_time = datetime.strptime(exec_state[0]['date'], '%Y-%m-%dT%H:%M:%S.%f')
finish_time = datetime.strptime(finish_state[0]['date'], '%Y-%m-%dT%H:%M:%S.%f')
jstats[job['name']] = { 'r_time': rtime, 's_time': start_time, 'f_time': finish_time }
# assert second job started after the first one
assert jstats['second']['s_time'] > jstats['first']['f_time']
# assert third job started after the first and second ones
assert all((jstats['third']['s_time'] > jstats['first']['f_time'],
jstats['third']['s_time'] > jstats['second']['f_time']))
rmtree(str(tmpdir))
def test_local_workflows_error(tmpdir):
file_path = tmpdir.join('jobs.json')
print('tmpdir: {}'.format(str(tmpdir)))
jobs = [job.to_dict() for job in [
Job('first',
JobExecution(
'sleep',
args=['2s'],
wd=abspath(tmpdir.join('first.sandbox')),
stdout='out',
stderr='err'
),
JobResources(numCores=ResourceSize(1)),
dependencies = JobDependencies(after=['not-existing'])
) ] ]
reqs = [{'request': 'submit', 'jobs': jobs},
{'request': 'control', 'command': 'finishAfterAllTasksDone'}]
save_reqs_to_file(reqs, file_path)
print('jobs saved to file_path: {}'.format(str(file_path)))
sys.argv = [ 'QCG-PilotJob', '--file', '--file-path', str(file_path), '--nodes', '2', '--wd', str(tmpdir),
'--report-format', 'json']
QCGPMService().start()
assert not exists(abspath(tmpdir.join('first.sandbox')))
rmtree(str(tmpdir))
#TODO: dodać testy dla workflowów na poziomie iteracji
#TODO: dodać testy dla błędnych workflów w tym: brak iteracji wskazanego zadania
#TODO: dodać testy dla błędnych workflowów: zapętlenie zależności
| 37.317136
| 128
| 0.547015
| 3,200
| 29,182
| 4.8525
| 0.080625
| 0.037094
| 0.044887
| 0.020801
| 0.900502
| 0.876224
| 0.863086
| 0.854714
| 0.832303
| 0.828697
| 0
| 0.005166
| 0.290179
| 29,182
| 781
| 129
| 37.364917
| 0.744472
| 0.038448
| 0
| 0.775974
| 0
| 0
| 0.192887
| 0.010773
| 0
| 0
| 0
| 0.00128
| 0.055195
| 1
| 0.021104
| false
| 0
| 0.021104
| 0
| 0.042208
| 0.043831
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
11af663f5c27cc69ed4395a64f1480d8895c6983
| 152
|
py
|
Python
|
sherlog/__init__.py
|
sancau/union
|
f1554974016bb2a8a4df0576cda5695d41073a64
|
[
"MIT"
] | 4
|
2017-02-09T13:00:46.000Z
|
2017-12-20T08:13:53.000Z
|
sherlog/__init__.py
|
sancau/sherlog
|
f1554974016bb2a8a4df0576cda5695d41073a64
|
[
"MIT"
] | 6
|
2017-02-09T10:10:23.000Z
|
2017-02-21T09:39:23.000Z
|
sherlog/__init__.py
|
sancau/union
|
f1554974016bb2a8a4df0576cda5695d41073a64
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from sherlog.logger import get_logger
from sherlog.logger import set_logger
from sherlog.version import VERSION
__version__ = VERSION
| 16.888889
| 37
| 0.822368
| 22
| 152
| 5.409091
| 0.454545
| 0.277311
| 0.285714
| 0.386555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007576
| 0.131579
| 152
| 8
| 38
| 19
| 0.893939
| 0.078947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
11afe6016b5e40df40f0423ebca6b9705240f780
| 4,627
|
py
|
Python
|
app/explore/views.py
|
valeriansaliou/waaave-web
|
8a0cde773563865a905af38f5a0b723a43b17341
|
[
"RSA-MD"
] | 1
|
2020-04-06T10:04:43.000Z
|
2020-04-06T10:04:43.000Z
|
app/explore/views.py
|
valeriansaliou/waaave-web
|
8a0cde773563865a905af38f5a0b723a43b17341
|
[
"RSA-MD"
] | null | null | null |
app/explore/views.py
|
valeriansaliou/waaave-web
|
8a0cde773563865a905af38f5a0b723a43b17341
|
[
"RSA-MD"
] | null | null | null |
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404
from .helpers import *
def root(request):
"""
Explore > Root
"""
return HttpResponseRedirect(reverse('explore.views.tutorials'))
def tutorials(request, page=1):
"""
Explore > Tutorials Newest
"""
success, response_data = TutorialsExploreHelper.build_response(
request=request,
page=page,
order_by='-date',
)
if not success:
raise Http404
response_data.update({
'explore_view': 'explore.views.tutorials',
})
return render(request, 'explore/explore_tutorials.jade', response_data)
def tutorials_popular(request, page=1):
"""
Explore > Tutorials Popular
"""
success, response_data = TutorialsExploreHelper.build_response(
request=request,
page=page,
order_by='popular',
)
if not success:
raise Http404
response_data.update({
'explore_view': 'explore.views.tutorials_popular',
})
return render(request, 'explore/explore_tutorials.jade', response_data)
def tutorials_alphabetical(request, page=1):
"""
Explore > Tutorials Alphabetical
"""
success, response_data = TutorialsExploreHelper.build_response(
request=request,
page=page,
order_by='title',
)
if not success:
raise Http404
response_data.update({
'explore_view': 'explore.views.tutorials_alphabetical',
})
return render(request, 'explore/explore_tutorials.jade', response_data)
def tutorials_yours(request, page=1):
"""
Explore > Tutorials Yours
"""
success, response_data = TutorialsExploreHelper.build_response(
request=request,
page=page,
order_by='-date',
author_id=request.user.id,
)
if not success:
raise Http404
response_data.update({
'explore_view': 'explore.views.tutorials_yours',
})
return render(request, 'explore/explore_tutorials.jade', response_data)
def books(request, page=1):
"""
Explore > Books
"""
success, response_data = BooksExploreHelper.build_response(
request=request,
page=page,
order_by='-date',
)
if not success:
raise Http404
response_data.update({
'explore_view': 'explore.views.books',
})
return render(request, 'explore/explore_books.jade', response_data)
def books_popular(request, page=1):
"""
Explore > Books
"""
success, response_data = BooksExploreHelper.build_response(
request=request,
page=page,
order_by='popular',
)
if not success:
raise Http404
response_data.update({
'explore_view': 'explore.views.books_popular',
})
return render(request, 'explore/explore_books.jade', response_data)
def books_alphabetical(request, page=1):
"""
Explore > Books
"""
success, response_data = BooksExploreHelper.build_response(
request=request,
page=page,
order_by='title',
)
if not success:
raise Http404
response_data.update({
'explore_view': 'explore.views.books_alphabetical',
})
return render(request, 'explore/explore_books.jade', response_data)
def spots(request, page=1):
"""
Explore > Spots
"""
success, response_data = SpotsExploreHelper.build_response(
request=request,
page=page,
order_by='-date',
)
if not success:
raise Http404
response_data.update({
'explore_view': 'explore.views.spots',
})
return render(request, 'explore/explore_spots.jade', response_data)
def spots_popular(request, page=1):
"""
Explore > Spots
"""
success, response_data = SpotsExploreHelper.build_response(
request=request,
page=page,
order_by='popular',
)
if not success:
raise Http404
response_data.update({
'explore_view': 'explore.views.spots_popular',
})
return render(request, 'explore/explore_spots.jade', response_data)
def spots_alphabetical(request, page=1):
"""
Explore > Spots
"""
success, response_data = SpotsExploreHelper.build_response(
request=request,
page=page,
order_by='name',
)
if not success:
raise Http404
response_data.update({
'explore_view': 'explore.views.spots_alphabetical',
})
return render(request, 'explore/explore_spots.jade', response_data)
| 21.621495
| 75
| 0.638427
| 474
| 4,627
| 6.052743
| 0.101266
| 0.125479
| 0.041826
| 0.066225
| 0.898919
| 0.836528
| 0.816661
| 0.816661
| 0.816661
| 0.797839
| 0
| 0.012439
| 0.252864
| 4,627
| 213
| 76
| 21.723005
| 0.817472
| 0.048412
| 0
| 0.700787
| 0
| 0
| 0.176776
| 0.126505
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086614
| false
| 0
| 0.031496
| 0
| 0.204724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
eeb966b6f818a128a12a3f2fcc9041c57d357619
| 70
|
py
|
Python
|
rate_your_mate/uuid.py
|
marcinxkaminski/rate-your-mate-api
|
b6b0b3e98087dc68b7819e0ffc8369b105735596
|
[
"MIT"
] | null | null | null |
rate_your_mate/uuid.py
|
marcinxkaminski/rate-your-mate-api
|
b6b0b3e98087dc68b7819e0ffc8369b105735596
|
[
"MIT"
] | null | null | null |
rate_your_mate/uuid.py
|
marcinxkaminski/rate-your-mate-api
|
b6b0b3e98087dc68b7819e0ffc8369b105735596
|
[
"MIT"
] | null | null | null |
from uuid import uuid4
def create() -> str:
return str(uuid4())
| 11.666667
| 23
| 0.642857
| 10
| 70
| 4.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.228571
| 70
| 5
| 24
| 14
| 0.796296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.