hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f4670c16d2ce392501632342f8452852bbfec954
| 133
|
py
|
Python
|
test/__init__.py
|
ToyotaResearchInstitute/task_behavior_ros
|
0bc58f88556c9029c9a579511e1fdab9bd58248b
|
[
"Apache-2.0"
] | 2
|
2017-02-16T00:47:39.000Z
|
2018-05-12T13:34:17.000Z
|
test/__init__.py
|
ToyotaResearchInstitute/task_behavior_ros
|
0bc58f88556c9029c9a579511e1fdab9bd58248b
|
[
"Apache-2.0"
] | 4
|
2017-02-17T19:05:22.000Z
|
2017-05-04T17:41:26.000Z
|
test/__init__.py
|
ToyotaResearchInstitute/task_behavior_ros
|
0bc58f88556c9029c9a579511e1fdab9bd58248b
|
[
"Apache-2.0"
] | 2
|
2019-03-08T06:45:25.000Z
|
2022-03-08T10:08:00.000Z
|
import rospy
def setup_package():
rospy.init_node('test')
def teardown_package():
rospy.signal_shutdown('shutting down')
| 13.3
| 42
| 0.721805
| 17
| 133
| 5.411765
| 0.764706
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 133
| 9
| 43
| 14.777778
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0.12782
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| true
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
f46dfe554be8455a2dbb9d9203eafcdc2cedba39
| 50
|
py
|
Python
|
autoscalingsim/scaling/policiesbuilder/scaled/scaling_aggregation_rules/parallel_rules_impl/__init__.py
|
Remit/autoscaling-simulator
|
091943c0e9eedf9543e9305682a067ab60f56def
|
[
"MIT"
] | 6
|
2021-03-10T16:23:10.000Z
|
2022-01-14T04:57:46.000Z
|
autoscalingsim/scaling/policiesbuilder/scaled/scaling_aggregation_rules/parallel_rules_impl/__init__.py
|
Remit/autoscaling-simulator
|
091943c0e9eedf9543e9305682a067ab60f56def
|
[
"MIT"
] | null | null | null |
autoscalingsim/scaling/policiesbuilder/scaled/scaling_aggregation_rules/parallel_rules_impl/__init__.py
|
Remit/autoscaling-simulator
|
091943c0e9eedf9543e9305682a067ab60f56def
|
[
"MIT"
] | 1
|
2022-01-14T04:57:55.000Z
|
2022-01-14T04:57:55.000Z
|
from .max_scale import *
from .min_scale import *
| 16.666667
| 24
| 0.76
| 8
| 50
| 4.5
| 0.625
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 50
| 2
| 25
| 25
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
be5298bad8687277f340107b5f70d0c986d6fe98
| 5,015
|
py
|
Python
|
tests/conversation_manager/test_search_rule.py
|
Rogggger/WeChatterBot
|
377899e8cab4ca5eca9b0136207e2afb97d9acb2
|
[
"BSD-3-Clause"
] | 1
|
2020-04-12T16:30:45.000Z
|
2020-04-12T16:30:45.000Z
|
tests/conversation_manager/test_search_rule.py
|
Jack2313/WeChatterBot
|
377899e8cab4ca5eca9b0136207e2afb97d9acb2
|
[
"BSD-3-Clause"
] | 7
|
2020-04-11T13:22:50.000Z
|
2020-05-14T00:19:37.000Z
|
tests/conversation_manager/test_search_rule.py
|
Jack2313/WeChatterBot
|
377899e8cab4ca5eca9b0136207e2afb97d9acb2
|
[
"BSD-3-Clause"
] | 3
|
2020-04-11T12:09:56.000Z
|
2020-12-16T13:26:20.000Z
|
from unittest import TestCase
from app import create_app
from app.view.conversation_manager import generate_token
import json
class SearchRuleTestCase(TestCase):
"""
Unit tests for the Admin Search Rule.
LJF: all tests clear 2020-5-13
"""
def setUp(self):
self.app = create_app().test_client()
self.myheaders = {'Content-Type': 'application/json'}
self.token = generate_token(b'buaa', 3600)
def test_no_attribute(self):
r = self.app.get(
'admin/search_rule',
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000001)
self.assertEqual(r.status_code, 400)
def test_no_username(self):
r = self.app.get(
'admin/search_rule?token=111&id=',
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000001)
self.assertEqual(r.status_code, 400)
def test_no_token(self):
r = self.app.get(
'admin/search_rule?username=wechatterbot&id=1',
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000001)
self.assertEqual(r.status_code, 400)
def test_wrong_username(self):
r = self.app.get(
'admin/search_rule?username=wechatterwhat' +
'&token='+self.token+'&id=1',
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000044)
self.assertEqual(r.status_code, 401)
def test_wrong_token(self):
wrong_token = generate_token(b'what', 3600)
r = self.app.get(
'admin/search_rule?username=wechatterbot' +
'&token=' + wrong_token + '&id=1',
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000044)
self.assertEqual(r.status_code, 401)
def test_empty_id_and_empty_text(self):
r = self.app.get(
'admin/search_rule?username=wechatterbot' +
'&token=' + self.token,
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000001)
self.assertEqual(r.status_code, 400)
def test_empty_id_and_no_text(self):
r = self.app.get(
'admin/search_rule?username=wechatterbot' +
'&token=' + self.token + '&id=',
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000001)
self.assertEqual(r.status_code, 400)
def test_no_id_and_empty_text(self):
r = self.app.get(
'admin/search_rule?username=wechatterbot' +
'&token=' + self.token + '&text=',
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000001)
self.assertEqual(r.status_code, 400)
def test_no_id_and_no_text(self):
r = self.app.get(
'admin/search_rule?username=wechatterbot' +
'&token=' + self.token,
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000001)
self.assertEqual(r.status_code, 400)
def test_id_not_a_number(self):
r = self.app.get(
'admin/search_rule?username=wechatterbot' +
'&token=' + self.token + '&id=string',
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000001)
def test_successful_search_with_text(self):
data = {
'response': '临时回复规则',
'text': '临时规则内容',
'username': 'wechatterbot',
'token': self.token
}
self.app.post(
'http://localhost:5000/admin/create_rule',
data=json.dumps(data),
headers=self.myheaders
)
r = self.app.get(
'admin/search_rule?username=wechatterbot' +
'&token=' + self.token + '&text=临时规则内容',
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
rules = result['rules']
self.assertEqual(rules[0]['text'], u"临时规则内容")
self.assertEqual(r.status_code, 200)
def test_successful_search_with_id(self):
r = self.app.get(
'admin/search_rule?username=wechatterbot' +
'&token=' + self.token + '&id=1',
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
rules = result['rules']
self.assertEqual(rules[0]['id'], 1)
self.assertEqual(r.status_code, 200)
self.assertEqual(result['number'], 1)
| 33.885135
| 61
| 0.579063
| 589
| 5,015
| 4.803056
| 0.14601
| 0.127253
| 0.068929
| 0.04666
| 0.79392
| 0.759279
| 0.738777
| 0.738777
| 0.728173
| 0.69848
| 0
| 0.043103
| 0.282951
| 5,015
| 147
| 62
| 34.115646
| 0.743604
| 0.013559
| 0
| 0.5625
| 1
| 0
| 0.163588
| 0.086665
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.101563
| false
| 0
| 0.03125
| 0
| 0.140625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be5c41052fd6659614600f51772827f2e34ad7a2
| 181
|
py
|
Python
|
rx/operators/observable/create.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-11-16T09:07:13.000Z
|
2018-11-16T09:07:13.000Z
|
rx/operators/observable/create.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
rx/operators/observable/create.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-08T08:23:08.000Z
|
2020-05-08T08:23:08.000Z
|
from rx.core import AnonymousObservable
def create(subscribe):
def _subscribe(observer, _=None):
return subscribe(observer)
return AnonymousObservable(_subscribe)
| 22.625
| 42
| 0.756906
| 18
| 181
| 7.444444
| 0.611111
| 0.253731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171271
| 181
| 7
| 43
| 25.857143
| 0.893333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
a3b35a69034a9a0c0be2e9ad16c1f5b49ce95c1b
| 4,968
|
py
|
Python
|
test/integration/022_bigquery_test/test_bigquery_changing_partitions.py
|
joellabes/dbt
|
1060035838650a30e86989cbf2693db7720ff002
|
[
"Apache-2.0"
] | null | null | null |
test/integration/022_bigquery_test/test_bigquery_changing_partitions.py
|
joellabes/dbt
|
1060035838650a30e86989cbf2693db7720ff002
|
[
"Apache-2.0"
] | null | null | null |
test/integration/022_bigquery_test/test_bigquery_changing_partitions.py
|
joellabes/dbt
|
1060035838650a30e86989cbf2693db7720ff002
|
[
"Apache-2.0"
] | null | null | null |
from test.integration.base import DBTIntegrationTest, FakeArgs, use_profile
import json
class TestChangingPartitions(DBTIntegrationTest):
@property
def schema(self):
return "bigquery_test_022"
@property
def models(self):
return "partition-models"
def run_changes(self, before, after):
results = self.run_dbt(['run', '--vars', json.dumps(before)])
self.assertEqual(len(results), 1)
results = self.run_dbt(['run', '--vars', json.dumps(after)])
self.assertEqual(len(results), 1)
def test_partitions(self, expected):
test_results = self.run_dbt(['test', '--vars', json.dumps(expected)])
for result in test_results:
self.assertEqual(result.status, 'pass')
self.assertFalse(result.skipped)
self.assertEqual(int(result.message), 0)
@use_profile('bigquery')
def test_bigquery_add_partition(self):
before = {"partition_by": None, "cluster_by": None}
after = {"partition_by": {'field': 'cur_time',
'data_type': 'timestamp'}, "cluster_by": None}
self.run_changes(before, after)
self.test_partitions({"expected": 1})
@use_profile('bigquery')
def test_bigquery_add_partition_year(self):
before = {"partition_by": None, "cluster_by": None}
after = {"partition_by": {'field': 'cur_time', 'data_type': 'timestamp', 'granularity': 'year'}, "cluster_by": None}
self.run_changes(before, after)
self.test_partitions({"expected": 1})
@use_profile('bigquery')
def test_bigquery_add_partition_month(self):
before = {"partition_by": None, "cluster_by": None}
after = {"partition_by": {'field': 'cur_time', 'data_type': 'timestamp', 'granularity': 'month'}, "cluster_by": None}
self.run_changes(before, after)
self.test_partitions({"expected": 1})
@use_profile('bigquery')
def test_bigquery_add_partition_hour(self):
before = {"partition_by": None, "cluster_by": None}
after = {"partition_by": {'field': 'cur_time', 'data_type': 'timestamp', 'granularity': 'hour'}, "cluster_by": None}
self.run_changes(before, after)
self.test_partitions({"expected": 1})
@use_profile('bigquery')
def test_bigquery_remove_partition(self):
before = {"partition_by": {'field': 'cur_time',
'data_type': 'timestamp'}, "cluster_by": None}
after = {"partition_by": None, "cluster_by": None}
self.run_changes(before, after)
@use_profile('bigquery')
def test_bigquery_change_partitions(self):
before = {"partition_by": {'field': 'cur_time',
'data_type': 'timestamp'}, "cluster_by": None}
after = {"partition_by": {'field': "cur_date"}, "cluster_by": None}
self.run_changes(before, after)
self.test_partitions({"expected": 1})
self.run_changes(after, before)
self.test_partitions({"expected": 1})
@use_profile('bigquery')
def test_bigquery_change_partitions_from_int(self):
before = {"partition_by": {"field": "id", "data_type": "int64", "range": {
"start": 0, "end": 10, "interval": 1}}, "cluster_by": None}
after = {"partition_by": {"field": "cur_date",
"data_type": "date"}, "cluster_by": None}
self.run_changes(before, after)
self.test_partitions({"expected": 1})
self.run_changes(after, before)
self.test_partitions({"expected": 2})
@use_profile('bigquery')
def test_bigquery_add_clustering(self):
before = {"partition_by": {'field': 'cur_time',
'data_type': 'timestamp'}, "cluster_by": None}
after = {"partition_by": {'field': "cur_date"}, "cluster_by": "id"}
self.run_changes(before, after)
@use_profile('bigquery')
def test_bigquery_remove_clustering(self):
before = {"partition_by": {'field': 'cur_time',
'data_type': 'timestamp'}, "cluster_by": "id"}
after = {"partition_by": {'field': "cur_date"}, "cluster_by": None}
self.run_changes(before, after)
@use_profile('bigquery')
def test_bigquery_change_clustering(self):
before = {"partition_by": {'field': 'cur_time',
'data_type': 'timestamp'}, "cluster_by": "id"}
after = {"partition_by": {'field': "cur_date"}, "cluster_by": "name"}
self.run_changes(before, after)
@use_profile('bigquery')
def test_bigquery_change_clustering_strict(self):
before = {'partition_by': {'field': 'cur_time',
'data_type': 'timestamp'}, 'cluster_by': 'id'}
after = {'partition_by': {'field': 'cur_date',
'data_type': 'date'}, 'cluster_by': 'name'}
self.run_changes(before, after)
| 43.2
| 125
| 0.598027
| 547
| 4,968
| 5.149909
| 0.131627
| 0.085907
| 0.096557
| 0.107916
| 0.817891
| 0.787007
| 0.785588
| 0.768548
| 0.719915
| 0.703585
| 0
| 0.005333
| 0.245169
| 4,968
| 114
| 126
| 43.578947
| 0.745867
| 0
| 0
| 0.547368
| 0
| 0
| 0.24537
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0.157895
| false
| 0.010526
| 0.021053
| 0.021053
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a3ba0c91ed1aec4b38a95f7ae24936f7ecc4162a
| 624
|
py
|
Python
|
tests/test_operators.py
|
dvillacis/BilevelImagingToolbox
|
99b259499b68141283601ccddb5732bb38f44d24
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2020-11-13T07:44:26.000Z
|
2021-06-01T21:09:00.000Z
|
tests/test_operators.py
|
dvillacis/BilevelImagingToolbox
|
99b259499b68141283601ccddb5732bb38f44d24
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
tests/test_operators.py
|
dvillacis/BilevelImagingToolbox
|
99b259499b68141283601ccddb5732bb38f44d24
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2020-09-09T15:34:18.000Z
|
2020-09-09T15:34:18.000Z
|
import numpy as np
from bilevel_imaging_toolbox import operators
x = np.array([[1,2,3],[4,5,6],[7,8,9]])
print('Forward differences')
op = operators.make_finite_differences_operator((3,3),'fn',1)
print(op.val(x)[:,:,0])
print(op.val(x)[:,:,1])
y = op.val(x)
print(op.conj(y))
print('Backward differences')
op = operators.make_finite_differences_operator((3,3),'bn',1)
print(op.val(x)[:,:,0])
print(op.val(x)[:,:,1])
y = op.val(x)
print(op.conj(y))
print('Centered differences')
op = operators.make_finite_differences_operator((3,3),'cn',1)
print(op.val(x)[:,:,0])
print(op.val(x)[:,:,1])
y = op.val(x)
print(op.conj(y))
| 24
| 61
| 0.669872
| 115
| 624
| 3.53913
| 0.313043
| 0.154791
| 0.132678
| 0.162162
| 0.739558
| 0.739558
| 0.739558
| 0.739558
| 0.739558
| 0.348894
| 0
| 0.041667
| 0.076923
| 624
| 25
| 62
| 24.96
| 0.664931
| 0
| 0
| 0.571429
| 0
| 0
| 0.104167
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.095238
| 0
| 0.095238
| 0.571429
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
a3db6f635cb6cf36755c320fba4ecd16e4d8746d
| 18,411
|
py
|
Python
|
scripts/manage_bonus.py
|
lamproot/telegramh5
|
7e4e9c7b32956c70f269cb630bb68b604e0d13f5
|
[
"WTFPL"
] | 1
|
2018-10-10T04:03:07.000Z
|
2018-10-10T04:03:07.000Z
|
scripts/manage_bonus.py
|
lamproot/telegramh5
|
7e4e9c7b32956c70f269cb630bb68b604e0d13f5
|
[
"WTFPL"
] | null | null | null |
scripts/manage_bonus.py
|
lamproot/telegramh5
|
7e4e9c7b32956c70f269cb630bb68b604e0d13f5
|
[
"WTFPL"
] | 1
|
2019-10-12T11:16:22.000Z
|
2019-10-12T11:16:22.000Z
|
#encoding:utf-8
import mysql
import datetime
import sys
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
conn = mysql.db()
now = datetime.datetime.now()
now_second = datetime.datetime.now().strftime('%s')
# 最大分红比例
def maxcash(userrank):
value = 0
sql = """
select value from zx_bonus_rule where category = 'maxcash' and `key` = %s
""" % (userrank)
result = conn.query(sql)
if result:
value = result[0]['value']
return value
def rate():
rate_sql = """
select category, value from zx_bonus_rule where category in ('rongzidun', 'jiangjinbi', 'lovemoney', 'platmoney', 'taxmoney')
"""
rates = conn.query(rate_sql)
conn.close()
if rates:
rates = rates
else:
rates = (
{'category': 'rongzidun', 'value': 25},
{'category': 'jiangjinbi', 'value': 55},
{'category': 'lovemoney', 'value': 1},
{'category': 'platmoney', 'value': 2},
{'category': 'taxmoney', 'value': 17}
)
return rates
# 插入管理补贴明细,流水
def insert_bonus_detail_2(uid, usernumber, realname, managercash):
# 比率配比
rates = rate()
jiangjinbi_award, rongzidun_award, lovemoney_award, platmoney_award, taxmoney_award = 0, 0, 0, 0, 0
for r in rates:
if r['category'] == 'jiangjinbi':
jiangjinbi_rate = r['value'] / 100
jiangjinbi_award = managercash * jiangjinbi_rate
elif r['category'] == 'rongzidun':
rongzidun_rate = r['value'] / 100
rongzidun_award = managercash * rongzidun_rate
elif r['category'] == 'lovemoney':
lovemoney_rate = r['value'] / 100
lovemoney_award = managercash * lovemoney_rate
elif r['category'] == 'platmoney':
platmoney_rate = r['value'] / 100
platmoney_award = managercash * platmoney_rate
elif r['category'] == 'taxmoney':
taxmoney_rate = r['value'] / 100
taxmoney_award = managercash * taxmoney_rate
real_total = managercash - lovemoney_award - platmoney_award - taxmoney_award
zx_member_sql = """
update zx_member set jiangjinbi = jiangjinbi + %s, rongzidun = rongzidun + %s where usernumber = %s
""" % (jiangjinbi_award, rongzidun_award, usernumber)
zx_member = conn.dml(zx_member_sql, 'update')
if zx_member:
max_bonus_sql = """
update zx_member set max_bonus = max_bonus + %s where uid = %s
""" % (managercash, uid)
conn.dml(max_bonus_sql, 'update')
zx_finance_sql = """
update zx_finance set expend = expend + %s, createtime = %s
""" % (managercash, now_second)
conn.dml(zx_finance_sql, 'update')
# 明细
zx_bonus_detail_sql = """
insert into zx_bonus_detail (touserid, tousernumber, torealname, moneytype, jiangjinbi, rongzidun, lovemoney, platmoney, taxmoney, total, real_total, createdate)
values (%s, %s, '%s', %s, %s, %s, %s, %s, %s, %s, %s, %s)
""" % (uid, usernumber, realname, 2, jiangjinbi_award, rongzidun_award, lovemoney_award, platmoney_award, taxmoney_award, managercash, real_total, now_second)
conn.dml(zx_bonus_detail_sql, 'insert')
# 奖金币流水
jiangjinbi_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (1, 1, uid, usernumber, realname, 1, 1, '戎子', 4, 1, jiangjinbi_award, now_second)
conn.dml(jiangjinbi_change_sql, 'insert')
jiangjinbi_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (1, 1, 1, 1, '戎子', uid, usernumber, realname, 4, 0, jiangjinbi_award, now_second)
conn.dml(jiangjinbi_change_sql_1, 'insert')
# 戎子盾流水
rongzidun_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (3, 3, uid, usernumber, realname, 1, 1, '戎子', 4, 1, rongzidun_award, now_second)
conn.dml(rongzidun_change_sql, 'insert')
rongzidun_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (3, 3, 1, 1, '戎子', uid, usernumber, realname, 4, 0, rongzidun_award, now_second)
conn.dml(rongzidun_change_sql_1, 'insert')
# 爱心基金流水
lovemoney_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (6, 6, uid, usernumber, realname, 1, 1, '戎子', 4, 0, lovemoney_award, now_second)
conn.dml(lovemoney_change_sql, 'insert')
# 爱心基金流水
lovemoney_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (6, 6, 1, 1, '戎子', uid, usernumber, realname, 4, 1, lovemoney_award, now_second)
conn.dml(lovemoney_change_sql_1, 'insert')
# 平台管理费流水
platmoney_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (7, 7, uid, usernumber, realname, 1, 1, '戎子', 4, 0, platmoney_award, now_second)
conn.dml(platmoney_change_sql, 'insert')
platmoney_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (7, 7, 1, 1, '戎子', uid, usernumber, realname, 4, 1, platmoney_award, now_second)
conn.dml(platmoney_change_sql_1, 'insert')
# 税费流水
taxmoney_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (8, 8, uid, usernumber, realname, 1, 1, '戎子', 4, 0, taxmoney_award, now_second)
conn.dml(taxmoney_change_sql, 'insert')
taxmoney_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (8, 8, 1, 1, '戎子', uid, usernumber, realname, 4, 1, taxmoney_award, now_second)
conn.dml(taxmoney_change_sql_1, 'insert')
return True
# 插入互助补贴明细,流水
def insert_bonus_detail_3(uid, usernumber, realname, leadercash):
# 比率配比
rates = rate()
jiangjinbi_award, rongzidun_award, lovemoney_award, platmoney_award, taxmoney_award = 0, 0, 0, 0, 0
for r in rates:
if r['category'] == 'jiangjinbi':
jiangjinbi_rate = r['value'] / 100
jiangjinbi_award = leadercash * jiangjinbi_rate
elif r['category'] == 'rongzidun':
rongzidun_rate = r['value'] / 100
rongzidun_award = leadercash * rongzidun_rate
elif r['category'] == 'lovemoney':
lovemoney_rate = r['value'] / 100
lovemoney_award = leadercash * lovemoney_rate
elif r['category'] == 'platmoney':
platmoney_rate = r['value'] / 100
platmoney_award = leadercash * platmoney_rate
elif r['category'] == 'taxmoney':
taxmoney_rate = r['value'] / 100
taxmoney_award = leadercash * taxmoney_rate
real_total = leadercash - lovemoney_award - platmoney_award - taxmoney_award
zx_member_sql = """
update zx_member set jiangjinbi = jiangjinbi + %s, rongzidun = rongzidun + %s where usernumber = %s
""" % (jiangjinbi_award, rongzidun_award, usernumber)
zx_member = conn.dml(zx_member_sql, 'update')
if zx_member:
max_bonus_sql = """
update zx_member set max_bonus = max_bonus + %s where uid = %s
""" % (leadercash, uid)
conn.dml(max_bonus_sql, 'update')
zx_finance_sql = """
update zx_finance set expend = expend + %s, createtime = %s
""" % (leadercash, now_second)
conn.dml(zx_finance_sql, 'update')
# 明细
zx_bonus_detail_sql = """
insert into zx_bonus_detail (touserid, tousernumber, torealname, moneytype, jiangjinbi, rongzidun, lovemoney, platmoney, taxmoney, total, real_total, createdate)
values (%s, %s, '%s', %s, %s, %s, %s, %s, %s, %s, %s, %s)
""" % (uid, usernumber, realname, 3, jiangjinbi_award, rongzidun_award, lovemoney_award, platmoney_award, taxmoney_award, leadercash, real_total, now_second)
conn.dml(zx_bonus_detail_sql, 'insert')
# 奖金币流水
jiangjinbi_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (1, 1, uid, usernumber, realname, 1, 1, '戎子', 5, 1, jiangjinbi_award, now_second)
conn.dml(jiangjinbi_change_sql, 'insert')
jiangjinbi_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (1, 1, 1, 1, '戎子', uid, usernumber, realname, 5, 0, jiangjinbi_award, now_second)
conn.dml(jiangjinbi_change_sql_1, 'insert')
# 戎子盾流水
rongzidun_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (3, 3, uid, usernumber, realname, 1, 1, '戎子', 5, 1, rongzidun_award, now_second)
conn.dml(rongzidun_change_sql, 'insert')
rongzidun_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (3, 3, 1, 1, '戎子', uid, usernumber, realname, 5, 0, rongzidun_award, now_second)
conn.dml(rongzidun_change_sql_1, 'insert')
# 爱心基金流水
lovemoney_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (6, 6, uid, usernumber, realname, 1, 1, '戎子', 5, 0, lovemoney_award, now_second)
conn.dml(lovemoney_change_sql, 'insert')
lovemoney_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (6, 6, 1, 1, '戎子', uid, usernumber, realname, 5, 1, lovemoney_award, now_second)
conn.dml(lovemoney_change_sql_1, 'insert')
# 平台管理费流水
platmoney_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (7, 7, uid, usernumber, realname, 1, 1, '戎子', 5, 0, platmoney_award, now_second)
conn.dml(platmoney_change_sql, 'insert')
platmoney_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (7, 7, 1, 1, '戎子', uid, usernumber, realname, 5, 1, platmoney_award, now_second)
conn.dml(platmoney_change_sql_1, 'insert')
# 税费流水
taxmoney_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (8, 8, uid, usernumber, realname, 1, 1, '戎子', 5, 0, taxmoney_award, now_second)
conn.dml(taxmoney_change_sql, 'insert')
taxmoney_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (8, 8, 1, 1, '戎子', uid, usernumber, realname, 5, 1, taxmoney_award, now_second)
conn.dml(taxmoney_change_sql_1, 'insert')
return True
def getmemberinfo(uid):
flag = False
sql = """
select usernumber, realname from zx_member where uid = %s
""" % (uid)
result = conn.query(sql)
if result:
return result
return flag
#插入互助补贴明细, 流水
def leaderbonus(uid, managercash):
sql = """
select `key`, value from zx_bonus_rule where category = 'leadercash'
"""
rates = conn.query(sql)
rate1 = 0
rate2 = 0
rate3 = 0
if rates:
for rate in rates:
if rate['key'] == 1:
rate1 = rate['value']
elif rate['key'] == 2:
rate2 = rate['value']
elif rate['key'] == 3:
rate3 = rate['value']
else:
rates = (
{'key': '1', 'value': 15},
{'key': '3', 'value': 10},
{'key': '5', 'value': 5}
)
for rate in rates:
if rate['key'] == 1:
rate1 = rate['value']
elif rate['key'] == 2:
rate2 = rate['value']
elif rate['key'] == 3:
rate3 = rate['value']
_uids = gettuijiannumber_parent(uid)
for i, v in enumerate(_uids):
if int(v) == 1:
del _uids[i]
else:
# 过滤掉普卡
filter_member_sql = """
select uid from zx_member where uid = %s and userrank = 1
""" % (v)
result = conn.query(filter_member_sql)
if result:
del _uids[i]
lengh = len(_uids)
if lengh > 3:
uids = _uids[0:3]
else:
uids = _uids[0:lengh]
i = 0
leadercash = 0
if uids:
for _uid in uids:
result = getmemberinfo(_uid)
if result:
usernumber = result[0]['usernumber']
realname = result[0]['realname']
i += 1
if i == 1:
leadercash = managercash * rate1 / 100
elif i == 2:
leadercash = managercash * rate2 / 100
elif i == 3:
leadercash = managercash * rate3 / 100
insert_bonus_detail_3(_uid, usernumber, realname, leadercash)
def member_achievement_status(uid):
flag = False
sql = """
select active_time from zx_member where uid = %s and achievementstatus = 0
""" % (uid)
result = conn.query(sql)
if result:
return True
else:
flag = False
return flag
# 通过子uid获取父拓展
def gettuijiannumber_parent(uid):
parents = []
sql = """
select recommenduserpath from zx_member where uid = %s
""" % (uid)
result = conn.query(sql)
if result:
parents = result[0]['recommenduserpath'].split(',')
return parents[-2::-1]
def getuservalue(parents):
members = []
for uid in parents:
if int(uid) == 1:
break
val = []
sql = """
select m.uid, m.usertitle, r.value from zx_member as m left join zx_bonus_rule as r on m.usertitle = r.key
where m.uid = %s and category = 'managercash' and m.userrank != 1 and m.usertitle != 0
""" % (uid)
result = conn.query(sql)
if result:
val.append(result[0]['uid'])
val.append(result[0]['usertitle'])
val.append(result[0]['value'])
members.append(val)
return members
# 获取管理奖比例
def getmaxmanagercash(usertitle):
value = 0
sql = """
select value from zx_bonus_rule where `key` = %s and category = 'managercash'
""" % (usertitle)
result = conn.query(sql)
if result:
value = result[0]['value']
return value
# 获取会员的级别对应的金额
def getmembervalue(uid):
value = 0
sql = """
select r.value from zx_member as m left join zx_bonus_rule as r on m.userrank = r.key
where r.category = 'userrank' and m.uid = %s
""" % (uid)
result = conn.query(sql)
if result:
value = result[0]['value']
return value
# 极差算法
def jicha(value, memberlevels):
for index, val in enumerate(memberlevels):
if index > 0:
flag = False
member_uid = int(memberlevels[index][0])
member_title = int(memberlevels[index][1])
member_value = int(memberlevels[index][2])
i = 0
for x in range(0, index):
if member_title > int(memberlevels[x][1]):
flag = True
elif member_title == int(memberlevels[x][1]):
flag = False
break
elif member_title < int(memberlevels[x][1]):
flag = False
break
i = int(memberlevels[x][2])
if flag:
_member_value = member_value - i
managercash = value * _member_value / 100
result = getmemberinfo(member_uid)
if result:
status = insert_bonus_detail_2(member_uid, result[0]['usernumber'], result[0]['realname'], managercash)
if status:
leaderbonus(member_uid, managercash)
elif index == 0:
member_uid = int(memberlevels[index][0])
member_title = int(memberlevels[index][1])
member_value = int(memberlevels[index][2])
managercash = value * member_value / 100
result = getmemberinfo(member_uid)
if result:
status = insert_bonus_detail_2(member_uid, result[0]['usernumber'], result[0]['realname'], managercash)
if status:
leaderbonus(member_uid, managercash)
return True
#更新会员的业绩状态
def update_achievement_status(uid):
sql = """
update zx_member set achievementstatus = 1 where uid = %s
""" % (uid)
status = conn.dml(sql, 'update')
return status
# 通过拓展的人计算管理奖
def managerbonus(uid):
flag = False
# 获取拓展人的级别金额
value = getmembervalue(uid)
# 获取拓展的人的父级
parents = gettuijiannumber_parent(uid)
if parents:
# 赛选有星级的会员
memberlevels = getuservalue(parents)
if memberlevels:
status = jicha(value, memberlevels)
return status
return flag
# 管理补贴和互助补贴
def main():
if len(sys.argv) >= 2:
uid = sys.argv[1]
status = managerbonus(uid)
if status:
update_achievement_status(uid)
conn.close()
print "ok"
if __name__ == '__main__':
main()
| 37.269231
| 171
| 0.663679
| 2,430
| 18,411
| 4.865021
| 0.069959
| 0.040941
| 0.055828
| 0.066994
| 0.77618
| 0.768144
| 0.768144
| 0.755118
| 0.739046
| 0.738454
| 0
| 0.019412
| 0.182988
| 18,411
| 493
| 172
| 37.344828
| 0.76652
| 0.013525
| 0
| 0.602978
| 0
| 0.124069
| 0.422757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.007444
| null | null | 0.002481
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a3f2ac7f104016f3a45e8901add2779ecdab6422
| 64
|
py
|
Python
|
catch/__init__.py
|
Small-Bodies-Node/catch
|
880b8ea9bf4cea430cd54c2a319d0a05b0930239
|
[
"BSD-3-Clause"
] | 2
|
2019-07-17T14:34:51.000Z
|
2020-03-25T16:05:03.000Z
|
catch/__init__.py
|
Small-Bodies-Node/catch
|
880b8ea9bf4cea430cd54c2a319d0a05b0930239
|
[
"BSD-3-Clause"
] | null | null | null |
catch/__init__.py
|
Small-Bodies-Node/catch
|
880b8ea9bf4cea430cd54c2a319d0a05b0930239
|
[
"BSD-3-Clause"
] | null | null | null |
from .catch import *
from .config import *
from . import schema
| 16
| 21
| 0.734375
| 9
| 64
| 5.222222
| 0.555556
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 64
| 3
| 22
| 21.333333
| 0.903846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
43338eb2417ed4740dac2beed88a4649aadc4a5c
| 143
|
py
|
Python
|
02-URLs-and-Templates-Lab/djangoProject/djangoProject/main_app/views.py
|
M0673N/Python-Web-Basics
|
cecc27f7a12f990756edcc8885290eb3b2e487b7
|
[
"MIT"
] | null | null | null |
02-URLs-and-Templates-Lab/djangoProject/djangoProject/main_app/views.py
|
M0673N/Python-Web-Basics
|
cecc27f7a12f990756edcc8885290eb3b2e487b7
|
[
"MIT"
] | null | null | null |
02-URLs-and-Templates-Lab/djangoProject/djangoProject/main_app/views.py
|
M0673N/Python-Web-Basics
|
cecc27f7a12f990756edcc8885290eb3b2e487b7
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def show_main_app(request):
return render(request, 'main_app/index.html')
| 17.875
| 49
| 0.762238
| 21
| 143
| 5.047619
| 0.809524
| 0.132075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146853
| 143
| 7
| 50
| 20.428571
| 0.868852
| 0.160839
| 0
| 0
| 0
| 0
| 0.161017
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
4334bf33213599d7b6fee77a854809fc600b8ed4
| 51,687
|
py
|
Python
|
src/nn_doc_retrieval/nn_doc_model.py
|
Derrors/Combine-FEVER-NSMN
|
e3458ee99f086e3d44c9da3ec3e2885511cd42c2
|
[
"MIT"
] | 4
|
2020-10-09T16:46:56.000Z
|
2022-01-03T18:42:24.000Z
|
src/nn_doc_retrieval/nn_doc_model.py
|
Frankey419/combine-FEVER-NSMN
|
8577ad47092c052d6c0456415cb2eebc2a392984
|
[
"MIT"
] | 4
|
2020-11-02T01:00:33.000Z
|
2020-11-02T01:07:45.000Z
|
src/nn_doc_retrieval/nn_doc_model.py
|
Frankey419/combine-FEVER-NSMN
|
8577ad47092c052d6c0456415cb2eebc2a392984
|
[
"MIT"
] | 2
|
2020-12-10T12:50:45.000Z
|
2021-03-06T11:26:53.000Z
|
import random
import torch
from allennlp.data.iterators import BasicIterator
from allennlp.data.token_indexers import SingleIdTokenIndexer, ELMoTokenCharactersIndexer
from allennlp.modules import Embedding, Elmo
from torch import nn
from utils import fever_db
import numpy as np
import os
import config
from data_util.data_readers.fever_sselection_reader import SSelectorReader
import nn_doc_retrieval.disabuigation_training as disamb
from sentence_retrieval.sampler_for_nmodel import get_full_list, post_filter, get_additional_list
from data_util.exvocab import load_vocab_embeddings
from log_util import save_tool
import utils
from flint import torch_util
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
from neural_modules import biDafAttn
from sample_for_nli.tf_idf_sample_v1_0 import sample_v1_0, select_sent_for_eval, convert_evidence2scoring_format
from utils import c_scorer, common
class ESIM(nn.Module):
# This is ESIM sequence matching model
# lstm
def __init__(self, rnn_size_in=(1024 + 300, 1024 + 300), rnn_size_out=(300, 300), max_l=100,
mlp_d=300, num_of_class=3, drop_r=0.5, activation_type='relu'):
super(ESIM, self).__init__()
self.dropout_layer = nn.Dropout(drop_r)
self.lstm_1 = nn.LSTM(input_size=rnn_size_in[0], hidden_size=rnn_size_out[0],
num_layers=1, bidirectional=True, batch_first=True)
self.lstm_2 = nn.LSTM(input_size=rnn_size_in[1], hidden_size=rnn_size_out[1],
num_layers=1, bidirectional=True, batch_first=True)
self.projection = nn.Linear(rnn_size_out[0] * 2 * 4, rnn_size_out[0])
self.max_l = max_l
self.bidaf = biDafAttn(300)
self.mlp_1 = nn.Linear(rnn_size_out[1] * 2 * 4, mlp_d)
self.sm = nn.Linear(mlp_d, num_of_class)
if activation_type == 'relu':
activation = nn.ReLU()
elif activation_type == 'tanh':
activation = nn.Tanh()
else:
raise ValueError("Not a valid activation!")
self.classifier = nn.Sequential(*[nn.Dropout(drop_r), self.mlp_1, activation, nn.Dropout(drop_r), self.sm])
def count_params(self):
total_c = 0
for param in self.parameters():
if len(param.size()) == 2:
d1, d2 = param.size()[0], param.size()[1]
total_c += d1 * d2
print("Total count:", total_c)
def display(self):
for name, param in self.named_parameters():
print(name, param.data.size())
def forward(self, layer1_s1, layer2_s1, l1, layer1_s2, layer2_s2, l2): # [B, T]
p_s1 = self.dropout_layer(layer1_s1)
p_s2 = self.dropout_layer(layer1_s2)
s1_layer1_out = torch_util.auto_rnn(self.lstm_1, p_s1, l1)
s2_layer1_out = torch_util.auto_rnn(self.lstm_1, p_s2, l2)
S = self.bidaf.similarity(s1_layer1_out, l1, s2_layer1_out, l2)
s1_att, s2_att = self.bidaf.get_both_tile(S, s1_layer1_out, s2_layer1_out)
s1_coattentioned = torch.cat([s1_layer1_out, s1_att, s1_layer1_out - s1_att,
s1_layer1_out * s1_att], dim=2)
s2_coattentioned = torch.cat([s2_layer1_out, s2_att, s2_layer1_out - s2_att,
s2_layer1_out * s2_att], dim=2)
p_s1_coattentioned = F.relu(self.projection(s1_coattentioned))
p_s2_coattentioned = F.relu(self.projection(s2_coattentioned))
s1_coatt_features = torch.cat([p_s1_coattentioned, layer2_s1], dim=2)
s2_coatt_features = torch.cat([p_s2_coattentioned, layer2_s2], dim=2)
s1_coatt_features = self.dropout_layer(s1_coatt_features)
s2_coatt_features = self.dropout_layer(s2_coatt_features)
s1_layer2_out = torch_util.auto_rnn(self.lstm_2, s1_coatt_features, l1)
s2_layer2_out = torch_util.auto_rnn(self.lstm_2, s2_coatt_features, l2)
s1_lay2_maxout = torch_util.max_along_time(s1_layer2_out, l1)
s2_lay2_maxout = torch_util.max_along_time(s2_layer2_out, l2)
features = torch.cat([s1_lay2_maxout, s2_lay2_maxout,
torch.abs(s1_lay2_maxout - s2_lay2_maxout),
s1_lay2_maxout * s2_lay2_maxout], dim=1)
return self.classifier(features)
class Model(nn.Module):
def __init__(self, weight, vocab_size, embedding_dim,
rnn_size_in=(1024 + 300, 1024 + 300),
rnn_size_out=(300, 300), max_l=150,
mlp_d=300, num_of_class=3, drop_r=0.5, activation_type='relu'):
super(Model, self).__init__()
self.glove_embd_layer = Embedding(vocab_size, embedding_dim,
weight=weight, padding_index=0)
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
num_of_elmo = 1
self.max_l = max_l
self.elmo_embd_layer = Elmo(options_file, weight_file, num_of_elmo, dropout=0)
self.esim_layer = ESIM(rnn_size_in, rnn_size_out, max_l, mlp_d, num_of_class, drop_r, activation_type)
def display(self, exclude=None):
total_p_size = 0
if exclude is None:
exclude = {'glove'}
for name, param in self.named_parameters():
if param.requires_grad:
print(name, param.data.size())
exclude_this = False
for exclude_name in exclude:
if exclude_name in str(name):
exclude_this = True
if exclude_this:
continue
nn = 1
for s in list(param.size()):
nn = nn * s
total_p_size += nn
print('Total Size:', total_p_size)
def raw_input_to_esim_input(self, s_tokens, s_elmo_chars):
s_tokens = torch_util.length_truncate(s_tokens, self.max_l)
s1_glove_embd = self.glove_embd_layer(s_tokens)
s1_elmo_out = self.elmo_embd_layer(s_elmo_chars)
s1_elmo_embd = torch_util.length_truncate(s1_elmo_out, self.max_l, is_elmo=True)
s1_mask, s1_len = torch_util.get_length_and_mask(s_tokens)
assert torch.equal(s1_elmo_embd['mask'], s1_mask)
return s1_glove_embd, s1_elmo_embd['elmo_representations'][0], s1_len
def forward(self, batch):
s1_tokens = batch['premise']['tokens'].to(next(self.parameters()).device)
s1_elmo_chars = batch['premise']['elmo_chars'].to(next(self.parameters()).device)
s2_tokens = batch['hypothesis']['tokens'].to(next(self.parameters()).device)
s2_elmo_chars = batch['hypothesis']['elmo_chars'].to(next(self.parameters()).device)
s1_glove_embd, s1_elmo_embd, s1_len = self.raw_input_to_esim_input(s1_tokens, s1_elmo_chars)
s2_glove_embd, s2_elmo_embd, s2_len = self.raw_input_to_esim_input(s2_tokens, s2_elmo_chars)
s1_layer1_in = torch.cat((s1_glove_embd, s1_elmo_embd), dim=2)
s1_layer2_in = s1_elmo_embd
s2_layer1_in = torch.cat((s2_glove_embd, s2_elmo_embd), dim=2)
s2_layer2_in = s2_elmo_embd
# print(s1_layer1_in.size())
# print(s1_layer2_in.size())
# print(s2_layer1_in.size())
# print(s2_layer2_in.size())
esim_out = self.esim_layer(s1_layer1_in, s1_layer2_in, s1_len,
s2_layer1_in, s2_layer2_in, s2_len)
return esim_out
def eval_model(model, data_iter, criterion):
print("Evaluating ...")
model.eval()
n_correct = loss = 0
totoal_size = 0
y_pred_list = []
y_true_list = []
for batch_idx, batch in enumerate(data_iter):
out = model(batch)
y = batch['label']
n_correct += (torch.max(out, 1)[1].view(y.size()) == y).sum().item()
y_pred_list.extend(torch.max(out, 1)[1].view(y.size()).tolist())
y_true_list.extend(y.tolist())
loss += criterion(out, y).item() * y.size(0)
totoal_size += y.size(0)
print('n_correct:', n_correct)
print('total_size:', totoal_size)
avg_acc = 100. * n_correct / totoal_size
avg_loss = loss / totoal_size
return avg_acc, avg_loss
def full_eval_model(model, data_iter, criterion, dev_data_list):
# select < (-.-) > 0
# non-select < (-.-) > 1
# hidden < (-.-) > -2
with torch.no_grad():
id2label = {
0: "true",
1: "false",
-2: "hidden"
}
print("Evaluating ...")
model.eval()
n_correct = loss = 0
totoal_size = 0
y_pred_logits_list = []
y_pred_prob_list = []
y_id_list = []
for batch_idx, batch in enumerate(tqdm(data_iter)):
out = model(batch)
prob = F.softmax(out, dim=1)
y = batch['selection_label']
y_id_list.extend(list(batch['pid']))
n_correct += (torch.max(out, 1)[1].view(y.size()) == y).sum().item()
y_pred_logits_list.extend(out[:, 0].tolist())
y_pred_prob_list.extend(prob[:, 0].tolist())
loss += criterion(out, y).item() * y.size(0)
totoal_size += y.size(0)
assert len(y_id_list) == len(dev_data_list)
assert len(y_pred_logits_list) == len(dev_data_list)
for i in range(len(dev_data_list)):
assert str(y_id_list[i]) == str(dev_data_list[i]['selection_id'])
# Matching id
dev_data_list[i]['score'] = y_pred_logits_list[i]
dev_data_list[i]['prob'] = y_pred_prob_list[i]
# Reset neural set
print('n_correct:', n_correct)
print('total_size:', totoal_size)
avg_acc = 100. * n_correct / totoal_size
avg_loss = loss / totoal_size
return avg_acc, avg_loss, dev_data_list
def hidden_eval(model, data_iter, dev_data_list):
# select < (-.-) > 0
# non-select < (-.-) > 1
# hidden < (-.-) > -2
with torch.no_grad():
id2label = {
0: "true",
1: "false",
-2: "hidden"
}
print("Evaluating ...")
model.eval()
totoal_size = 0
y_pred_logits_list = []
y_pred_prob_list = []
y_id_list = []
for batch_idx, batch in enumerate(tqdm(data_iter)):
out = model(batch)
prob = F.softmax(out, dim=1)
y = batch['selection_label']
y_id_list.extend(list(batch['pid']))
y_pred_logits_list.extend(out[:, 0].tolist())
y_pred_prob_list.extend(prob[:, 0].tolist())
totoal_size += y.size(0)
assert len(y_id_list) == len(dev_data_list)
assert len(y_pred_logits_list) == len(dev_data_list)
for i in range(len(dev_data_list)):
assert str(y_id_list[i]) == str(dev_data_list[i]['selection_id'])
# Matching id
dev_data_list[i]['score'] = y_pred_logits_list[i]
dev_data_list[i]['prob'] = y_pred_prob_list[i]
# Reset neural set
print('total_size:', totoal_size)
return dev_data_list
def train_fever():
num_epoch = 8
seed = 12
batch_size = 128
experiment_name = "simple_nn"
lazy = True
torch.manual_seed(seed)
keep_neg_sample_prob = 0.5
sample_prob_decay = 0.1
dev_upstream_file = config.RESULT_PATH / "sent_retri/2018_07_05_17:17:50_r/dev.jsonl"
train_upstream_file = config.RESULT_PATH / "sent_retri/2018_07_05_17:17:50_r/train.jsonl"
# Prepare Data
token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='tokens'), # This is the raw tokens
'elmo_chars': ELMoTokenCharactersIndexer(namespace='elmo_characters') # This is the elmo_characters
}
train_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=lazy)
# dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=False)
dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=lazy)
complete_upstream_dev_data = get_full_list(config.T_FEVER_DEV_JSONL, dev_upstream_file, pred=True)
print("Dev size:", len(complete_upstream_dev_data))
dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
# Load Vocabulary
biterator = BasicIterator(batch_size=batch_size)
dev_biterator = BasicIterator(batch_size=batch_size)
vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli_basic")
# THis is important
vocab.add_token_to_namespace("true", namespace="labels")
vocab.add_token_to_namespace("false", namespace="labels")
vocab.add_token_to_namespace("hidden", namespace="labels")
vocab.change_token_with_index_to_namespace("hidden", -2, namespace='labels')
# Label value
vocab.get_index_to_token_vocabulary('labels')
print(vocab.get_token_to_index_vocabulary('labels'))
print(vocab.get_vocab_size('tokens'))
biterator.index_with(vocab)
dev_biterator.index_with(vocab)
# exit(0)
# Build Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
device_num = -1 if device.type == 'cpu' else 0
model = Model(weight=weight_dict['glove.840B.300d'],
vocab_size=vocab.get_vocab_size('tokens'),
embedding_dim=300, max_l=300, num_of_class=2)
model.display()
model.to(device)
# Create Log File
file_path_prefix, date = save_tool.gen_file_prefix(f"{experiment_name}")
# Save the source code.
script_name = os.path.basename(__file__)
with open(os.path.join(file_path_prefix, script_name), 'w') as out_f, open(__file__, 'r') as it:
out_f.write(it.read())
out_f.flush()
# Save source code end.
best_dev = -1
iteration = 0
start_lr = 0.0002
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=start_lr)
criterion = nn.CrossEntropyLoss()
for i_epoch in range(num_epoch):
print("Resampling...")
# Resampling
complete_upstream_train_data = get_full_list(config.T_FEVER_TRAIN_JSONL, train_upstream_file, pred=False)
filtered_train_data = post_filter(complete_upstream_train_data, keep_prob=keep_neg_sample_prob,
seed=12 + i_epoch)
# Change the seed to avoid duplicate sample...
keep_neg_sample_prob -= sample_prob_decay
print("Sampled_length:", len(filtered_train_data))
sampled_train_instances = train_fever_data_reader.read(filtered_train_data)
train_iter = biterator(sampled_train_instances, shuffle=True, num_epochs=1, cuda_device=device_num)
for i, batch in tqdm(enumerate(train_iter)):
model.train()
out = model(batch)
y = batch['selection_label']
loss = criterion(out, y)
# No decay
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
if i_epoch <= 4:
mod = 25000
else:
mod = 10000
if iteration % mod == 0:
eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
dev_score, dev_loss, complete_upstream_dev_data = full_eval_model(model, eval_iter, criterion,
complete_upstream_dev_data)
dev_results_list = score_converter_v0(config.T_FEVER_DEV_JSONL, complete_upstream_dev_data)
eval_mode = {'check_sent_id_correct': True, 'standard': True}
strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(dev_results_list, config.T_FEVER_DEV_JSONL,
mode=eval_mode, verbose=False)
total = len(dev_results_list)
hit = eval_mode['check_sent_id_correct_hits']
tracking_score = hit / total
print(f"Dev(clf_acc/pr/rec/f1/loss):{dev_score}/{pr}/{rec}/{f1}/{dev_loss}")
print(f"Tracking score:", f"{tracking_score}")
need_save = False
if tracking_score > best_dev:
best_dev = tracking_score
need_save = True
if need_save:
save_path = os.path.join(
file_path_prefix,
f'i({iteration})_epoch({i_epoch})_'
f'(tra_score:{tracking_score}|clf_acc:{dev_score}|pr:{pr}|rec:{rec}|f1:{f1}|loss:{dev_loss})'
)
torch.save(model.state_dict(), save_path)
def train_fever_v1():
num_epoch = 10
seed = 12
batch_size = 64
dev_batch_size = 128
experiment_name = "simple_nn_doc_first_sent"
# experiment_name = "simple_nn_doc"
lazy = True
torch.manual_seed(seed)
contain_first_sentence = True
pn_ratio = 1.0
# keep_neg_sample_prob = 0.4
# sample_prob_decay = 0.05
dev_upstream_file = config.RESULT_PATH / "doc_retri_bls/docretri.basic.nopageview/dev.jsonl"
train_upstream_file = config.RESULT_PATH / "doc_retri_bls/docretri.basic.nopageview/train.jsonl"
dev_data_list = common.load_jsonl(dev_upstream_file)
# Prepare Data
token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='tokens'), # This is the raw tokens
'elmo_chars': ELMoTokenCharactersIndexer(namespace='elmo_characters') # This is the elmo_characters
}
train_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=lazy, max_l=180)
# dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=False)
dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=lazy, max_l=180)
cursor = fever_db.get_cursor()
complete_upstream_dev_data = disamb.sample_disamb_inference(common.load_jsonl(dev_upstream_file), cursor,
contain_first_sentence=contain_first_sentence)
print("Dev size:", len(complete_upstream_dev_data))
dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
# Load Vocabulary
biterator = BasicIterator(batch_size=batch_size)
dev_biterator = BasicIterator(batch_size=dev_batch_size)
vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli_basic")
# THis is important
vocab.add_token_to_namespace("true", namespace="selection_labels")
vocab.add_token_to_namespace("false", namespace="selection_labels")
vocab.add_token_to_namespace("hidden", namespace="selection_labels")
vocab.change_token_with_index_to_namespace("hidden", -2, namespace='selection_labels')
# Label value
vocab.get_index_to_token_vocabulary('selection_labels')
print(vocab.get_token_to_index_vocabulary('selection_labels'))
print(vocab.get_vocab_size('tokens'))
biterator.index_with(vocab)
dev_biterator.index_with(vocab)
# exit(0)
# Build Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
device_num = -1 if device.type == 'cpu' else 0
model = Model(weight=weight_dict['glove.840B.300d'],
vocab_size=vocab.get_vocab_size('tokens'),
embedding_dim=300, max_l=160, num_of_class=2)
model.display()
model.to(device)
# Create Log File
file_path_prefix, date = save_tool.gen_file_prefix(f"{experiment_name}")
# Save the source code.
script_name = os.path.basename(__file__)
with open(os.path.join(file_path_prefix, script_name), 'w') as out_f, open(__file__, 'r') as it:
out_f.write(it.read())
out_f.flush()
# Save source code end.
best_dev = -1
iteration = 0
start_lr = 0.0002
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=start_lr)
criterion = nn.CrossEntropyLoss()
for i_epoch in range(num_epoch):
print("Resampling...")
# Resampling
complete_upstream_train_data = disamb.sample_disamb_training_v0(common.load_jsonl(train_upstream_file),
cursor, pn_ratio, contain_first_sentence,
only_found=False)
random.shuffle(complete_upstream_train_data)
print("Sample Prob.:", pn_ratio)
print("Sampled_length:", len(complete_upstream_train_data))
sampled_train_instances = train_fever_data_reader.read(complete_upstream_train_data)
train_iter = biterator(sampled_train_instances, shuffle=True, num_epochs=1, cuda_device=device_num)
for i, batch in tqdm(enumerate(train_iter)):
model.train()
out = model(batch)
y = batch['selection_label']
loss = criterion(out, y)
# No decay
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
if i_epoch <= 5:
mod = 1000
else:
mod = 500
if iteration % mod == 0:
eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)
disamb.enforce_disabuigation_into_retrieval_result_v0(complete_upstream_dev_data,
dev_data_list)
oracle_score, pr, rec, f1 = c_scorer.fever_doc_only(dev_data_list, dev_data_list, max_evidence=5)
print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
print("Strict score:", oracle_score)
print(f"Eval Tracking score:", f"{oracle_score}")
need_save = False
if oracle_score > best_dev:
best_dev = oracle_score
need_save = True
if need_save:
save_path = os.path.join(
file_path_prefix,
f'i({iteration})_epoch({i_epoch})_'
f'(tra_score:{oracle_score}|pr:{pr}|rec:{rec}|f1:{f1})'
)
torch.save(model.state_dict(), save_path)
#
print("Epoch Evaluation...")
eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)
disamb.enforce_disabuigation_into_retrieval_result_v0(complete_upstream_dev_data,
dev_data_list)
oracle_score, pr, rec, f1 = c_scorer.fever_doc_only(dev_data_list, dev_data_list, max_evidence=5)
print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
print("Strict score:", oracle_score)
print(f"Eval Tracking score:", f"{oracle_score}")
need_save = False
if oracle_score > best_dev:
best_dev = oracle_score
need_save = True
if need_save:
save_path = os.path.join(
file_path_prefix,
f'i({iteration})_epoch({i_epoch})_e'
f'(tra_score:{oracle_score}|pr:{pr}|rec:{rec}|f1:{f1})'
)
torch.save(model.state_dict(), save_path)
def doc_model_eval():
seed = 12
batch_size = 128
dev_batch_size = 128
lazy = True
torch.manual_seed(seed)
contain_first_sentence = True
# keep_neg_sample_prob = 0.4
# sample_prob_decay = 0.05
# model_path = "/home/easonnie/projects/FunEver/saved_models/08-26-15:13:35_simple_nn_doc/i(7000)_epoch(1)_(tra_score:0.9164416441644164|pr:0.4283778377837277|rec:0.8746624662466247|f1:0.575095052581864)"
model_path = "/home/easonnie/projects/FunEver/saved_models/08-26-15:46:10_simple_nn_doc_first_sent/i(9000)_epoch(1)_(tra_score:0.9212421242124212|pr:0.4299679967996279|rec:0.8818631863186318|f1:0.5780819247968391)"
dev_upstream_file = config.RESULT_PATH / "doc_retri_bls/docretri.basic.nopageview/dev.jsonl"
# train_upstream_file = config.RESULT_PATH / "doc_retri_bls/docretri.basic.nopageview/train.jsonl"
dev_data_list = common.load_jsonl(dev_upstream_file)
# Prepare Data
token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='tokens'), # This is the raw tokens
'elmo_chars': ELMoTokenCharactersIndexer(namespace='elmo_characters') # This is the elmo_characters
}
dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=lazy, max_l=180)
cursor = fever_db.get_cursor()
complete_upstream_dev_data = disamb.sample_disamb_inference(common.load_jsonl(dev_upstream_file), cursor,
contain_first_sentence=contain_first_sentence)
print("Dev size:", len(complete_upstream_dev_data))
dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
# Load Vocabulary
biterator = BasicIterator(batch_size=batch_size)
dev_biterator = BasicIterator(batch_size=dev_batch_size)
vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli_basic")
# THis is important
vocab.add_token_to_namespace("true", namespace="selection_labels")
vocab.add_token_to_namespace("false", namespace="selection_labels")
vocab.add_token_to_namespace("hidden", namespace="selection_labels")
vocab.change_token_with_index_to_namespace("hidden", -2, namespace='selection_labels')
# Label value
vocab.get_index_to_token_vocabulary('selection_labels')
print(vocab.get_token_to_index_vocabulary('selection_labels'))
print(vocab.get_vocab_size('tokens'))
biterator.index_with(vocab)
dev_biterator.index_with(vocab)
# exit(0)
# Build Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
device_num = -1 if device.type == 'cpu' else 0
model = Model(weight=weight_dict['glove.840B.300d'],
vocab_size=vocab.get_vocab_size('tokens'),
embedding_dim=300, max_l=160, num_of_class=2)
model.load_state_dict(torch.load(model_path))
model.display()
model.to(device)
eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)
common.save_jsonl(complete_upstream_dev_data,
"/home/easonnie/projects/FunEver/saved_models/08-26-15:46:10_simple_nn_doc_first_sent/ablation_neural_doc.jsonl")
disamb.enforce_disabuigation_into_retrieval_result_v1(complete_upstream_dev_data,
dev_data_list)
oracle_score, pr, rec, f1 = c_scorer.fever_doc_only(dev_data_list, dev_data_list, max_evidence=5)
print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
print("Strict score:", oracle_score)
print(f"Eval Tracking score:", f"{oracle_score}")
def pipeline_function(upstream_file, model_path):
seed = 12
batch_size = 128
dev_batch_size = 128
lazy = True
torch.manual_seed(seed)
contain_first_sentence = True
dev_upstream_file = upstream_file
dev_data_list = common.load_jsonl(dev_upstream_file)
# Prepare Data
token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='tokens'), # This is the raw tokens
'elmo_chars': ELMoTokenCharactersIndexer(namespace='elmo_characters') # This is the elmo_characters
}
dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=lazy, max_l=180)
cursor = fever_db.get_cursor()
complete_upstream_dev_data = disamb.sample_disamb_inference(common.load_jsonl(dev_upstream_file), cursor,
contain_first_sentence=contain_first_sentence)
print("Dev size:", len(complete_upstream_dev_data))
dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
# Load Vocabulary
biterator = BasicIterator(batch_size=batch_size)
dev_biterator = BasicIterator(batch_size=dev_batch_size)
vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli_basic")
# THis is important
vocab.add_token_to_namespace("true", namespace="selection_labels")
vocab.add_token_to_namespace("false", namespace="selection_labels")
vocab.add_token_to_namespace("hidden", namespace="selection_labels")
vocab.change_token_with_index_to_namespace("hidden", -2, namespace='selection_labels')
# Label value
vocab.get_index_to_token_vocabulary('selection_labels')
print(vocab.get_token_to_index_vocabulary('selection_labels'))
print(vocab.get_vocab_size('tokens'))
biterator.index_with(vocab)
dev_biterator.index_with(vocab)
# exit(0)
# Build Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
device_num = -1 if device.type == 'cpu' else 0
model = Model(weight=weight_dict['glove.840B.300d'],
vocab_size=vocab.get_vocab_size('tokens'),
embedding_dim=300, max_l=160, num_of_class=2)
model.load_state_dict(torch.load(model_path))
model.display()
model.to(device)
eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1)
complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)
# common.save_jsonl(complete_upstream_dev_data,
# "/home/easonnie/projects/FunEver/saved_models/08-26-15:46:10_simple_nn_doc_first_sent/ablation_neural_doc.jsonl")
# remember to add this back
# disamb.enforce_disabuigation_into_retrieval_result_v1(complete_upstream_dev_data,
# dev_data_list)
dev_doc_score_list = complete_upstream_dev_data
return dev_doc_score_list
# oracle_score, pr, rec, f1 = c_scorer.fever_doc_only(dev_data_list, dev_data_list, max_evidence=5)
# print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
# print("Strict score:", oracle_score)
# print(f"Eval Tracking score:", f"{oracle_score}")
def utest_results_debug():
cursor = fever_db.get_cursor()
contain_first_sentence = True
dev_upstream_file = config.RESULT_PATH / "doc_retri_bls/docretri.basic.nopageview/dev.jsonl"
# dev_upstream_file = config.RESULT_PATH / "doc_retri/docretri.pageview/dev.jsonl"
# train_upstream_file = config.RESULT_PATH / "doc_retri_bls/docretri.basic.nopageview/train.jsonl"
dev_data_list = common.load_jsonl(dev_upstream_file)
# disamb.item_remove_old_rule(dev_data_list)
disamb.item_resorting(dev_data_list, 5)
oracle_score, pr, rec, f1 = c_scorer.fever_doc_only(dev_data_list, dev_data_list, max_evidence=5)
print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
oracle_score, pr, rec, f1 = c_scorer.fever_doc_only(dev_data_list, dev_data_list, max_evidence=10)
print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
# Dev(raw_acc/pr/rec/f1):0.9198419841984199/0.4589658965896083/0.8797629762976298/0.6032307383226128 length: 3.25
# Dev(raw_acc/pr/rec/f1):0.9202920292029203/0.5114148914891038/0.8804380438043804/0.6470067565581602 length: 2.89
#
# exit(0)
# complete_upstream_dev_data = disamb.sample_disamb_inference(common.load_jsonl(dev_upstream_file), cursor,
# contain_first_sentence=contain_first_sentence)
complete_upstream_dev_data = common.load_jsonl(
"/home/easonnie/projects/FunEver/saved_models/08-26-15:46:10_simple_nn_doc_first_sent/ablation_neural_doc.jsonl")
disamb.enforce_disabuigation_into_retrieval_result_v2(complete_upstream_dev_data,
dev_data_list, prob_sh=0.00005)
oracle_score, pr, rec, f1 = c_scorer.fever_doc_only(dev_data_list, dev_data_list, max_evidence=5)
print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
print("Strict score:", oracle_score)
print(f"Eval Tracking score:", f"{oracle_score}")
disamb.item_resorting(dev_data_list, 10)
oracle_score, pr, rec, f1 = c_scorer.fever_doc_only(dev_data_list, dev_data_list, max_evidence=10)
print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
print("Strict score:", oracle_score)
print(f"Eval Tracking score:", f"{oracle_score}")
# Eval Tracking score: 0.9202420242024203 prob
# Eval Tracking score: 0.9202420242024203 score
def utest_results():
cursor = fever_db.get_cursor()
contain_first_sentence = True
dev_upstream_file = config.RESULT_PATH / "doc_retri_bls/docretri.basic.nopageview/dev.jsonl"
#
# dev_upstream_file = config.RESULT_PATH / "doc_retri/docretri.pageview/dev.jsonl"
train_upstream_file = config.RESULT_PATH / "doc_retri_bls/docretri.basic.nopageview/train.jsonl"
dev_data_list = common.load_jsonl(dev_upstream_file)
# disamb.item_resorting(dev_data_list)
# disamb.item_remove_old_rule(dev_data_list)
oracle_score, pr, rec, f1 = c_scorer.fever_doc_only(dev_data_list, dev_data_list, max_evidence=5)
print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
print("Strict score:", oracle_score)
print(f"Eval Tracking score:", f"{oracle_score}")
# oracle_score, pr, rec, f1 = c_scorer.fever_doc_only(dev_data_list, dev_data_list, max_evidence=10)
#
# print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
# print("Strict score:", oracle_score)
# print(f"Eval Tracking score:", f"{oracle_score}")
# Dev(raw_acc/pr/rec/f1):0.9198419841984199/0.4589658965896083/0.8797629762976298/0.6032307383226128 length: 3.25
# Dev(raw_acc/pr/rec/f1):0.9202920292029203/0.5114148914891038/0.8804380438043804/0.6470067565581602 length: 2.89
# print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
#
# exit(0)
# complete_upstream_dev_data = disamb.sample_disamb_inference(common.load_jsonl(dev_upstream_file), cursor,
# contain_first_sentence=contain_first_sentence)
complete_upstream_dev_data = common.load_jsonl(
"/home/easonnie/projects/FunEver/saved_models/08-26-15:46:10_simple_nn_doc_first_sent/ablation_neural_doc.jsonl")
disamb.enforce_disabuigation_into_retrieval_result_v2(complete_upstream_dev_data,
dev_data_list, prob_sh=0.0001)
oracle_score, pr, rec, f1 = c_scorer.fever_doc_only(dev_data_list, dev_data_list, max_evidence=5)
print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
print("Strict score:", oracle_score)
print(f"Eval Tracking score:", f"{oracle_score}")
# oracle_score, pr, rec, f1 = c_scorer.fever_doc_only(dev_data_list, dev_data_list, max_evidence=10)
#
# print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
# print("Strict score:", oracle_score)
# print(f"Eval Tracking score:", f"{oracle_score}")
# Eval Tracking score: 0.9202420242024203 prob
# Eval Tracking score: 0.9202420242024203 score
def build_relatedness_for_train():
seed = 12
batch_size = 128
dev_batch_size = 128
lazy = True
torch.manual_seed(seed)
contain_first_sentence = True
pn_ratio = 1.0
# keep_neg_sample_prob = 0.4
# sample_prob_decay = 0.05
# model_path = "/home/easonnie/projects/FunEver/saved_models/08-26-15:13:35_simple_nn_doc/i(7000)_epoch(1)_(tra_score:0.9164416441644164|pr:0.4283778377837277|rec:0.8746624662466247|f1:0.575095052581864)"
model_path = "/home/easonnie/projects/FunEver/saved_models/08-26-15:46:10_simple_nn_doc_first_sent/i(9000)_epoch(1)_(tra_score:0.9212421242124212|pr:0.4299679967996279|rec:0.8818631863186318|f1:0.5780819247968391)"
dev_upstream_file = config.RESULT_PATH / "doc_retri_bls/docretri.basic.nopageview/dev.jsonl"
# train
# train_upstream_file = config.RESULT_PATH / "doc_retri_bls/docretri.basic.nopageview/train.jsonl"
# dev_data_list = common.load_jsonl(dev_upstream_file)
train_upstream_file = dev_upstream_file
train_data_list = common.load_jsonl(train_upstream_file)
# Prepare Data
token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='tokens'), # This is the raw tokens
'elmo_chars': ELMoTokenCharactersIndexer(namespace='elmo_characters') # This is the elmo_characters
}
dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=lazy, max_l=180)
cursor = fever_db.get_cursor()
# complete_upstream_dev_data = disamb.sample_disamb_inference(common.load_jsonl(dev_upstream_file), cursor,
# contain_first_sentence=contain_first_sentence)
#
complete_upstream_train_data = disamb.sample_disamb_training_v0(
train_data_list,
cursor, pn_ratio, contain_first_sentence,
only_found=False)
# complete_upstream_train_data = complete_upstream_dev_data
print("Train size:", len(complete_upstream_train_data))
dev_instances = dev_fever_data_reader.read(complete_upstream_train_data)
# Load Vocabulary
biterator = BasicIterator(batch_size=batch_size)
dev_biterator = BasicIterator(batch_size=dev_batch_size)
vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli_basic")
# THis is important
vocab.add_token_to_namespace("true", namespace="selection_labels")
vocab.add_token_to_namespace("false", namespace="selection_labels")
vocab.add_token_to_namespace("hidden", namespace="selection_labels")
vocab.change_token_with_index_to_namespace("hidden", -2, namespace='selection_labels')
# Label value
vocab.get_index_to_token_vocabulary('selection_labels')
print(vocab.get_token_to_index_vocabulary('selection_labels'))
print(vocab.get_vocab_size('tokens'))
biterator.index_with(vocab)
dev_biterator.index_with(vocab)
# exit(0)
# Build Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
device_num = -1 if device.type == 'cpu' else 0
model = Model(weight=weight_dict['glove.840B.300d'],
vocab_size=vocab.get_vocab_size('tokens'),
embedding_dim=300, max_l=160, num_of_class=2)
model.load_state_dict(torch.load(model_path))
model.display()
model.to(device)
eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_train_data)
common.save_jsonl(complete_upstream_dev_data,
"/home/easonnie/projects/FunEver/saved_models/08-26-15:46:10_simple_nn_doc_first_sent/extra_needed_training_data/dev_doc_list.jsonl")
# disamb.enforce_disabuigation_into_retrieval_result_v1(complete_upstream_dev_data,
# dev_data_list)
# oracle_score, pr, rec, f1 = c_scorer.fever_doc_only(dev_data_list, dev_data_list, max_evidence=5)
#
# print(f"Dev(raw_acc/pr/rec/f1):{oracle_score}/{pr}/{rec}/{f1}")
# print("Strict score:", oracle_score)
# print(f"Eval Tracking score:", f"{oracle_score}")
def debug_fever():
num_epoch = 8
seed = 12
batch_size = 128
experiment_name = "simple_nn"
lazy = True
torch.manual_seed(seed)
keep_neg_sample_prob = 0.6
sample_prob_decay = 0.1
dev_upstream_file = config.RESULT_PATH / "doc_retri/cn_util_Jul17_docretri.singularize/dev.jsonl"
train_upstream_file = config.RESULT_PATH / "doc_retri/cn_util_Jul17_docretri.singularize/train.jsonl"
# Prepare Data
token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='tokens'), # This is the raw tokens
'elmo_chars': ELMoTokenCharactersIndexer(namespace='elmo_characters') # This is the elmo_characters
}
train_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=lazy, max_l=300)
# dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=False)
dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=lazy, max_l=300)
complete_upstream_dev_data = get_full_list(config.T_FEVER_DEV_JSONL, dev_upstream_file, pred=True)
print("Dev size:", len(complete_upstream_dev_data))
dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
# Load Vocabulary
biterator = BasicIterator(batch_size=batch_size)
dev_biterator = BasicIterator(batch_size=batch_size)
vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli_basic")
# THis is important
vocab.add_token_to_namespace("true", namespace="selection_labels")
vocab.add_token_to_namespace("false", namespace="selection_labels")
vocab.add_token_to_namespace("hidden", namespace="selection_labels")
vocab.change_token_with_index_to_namespace("hidden", -2, namespace='selection_labels')
# Label value
vocab.get_index_to_token_vocabulary('selection_labels')
print(vocab.get_token_to_index_vocabulary('selection_labels'))
print(vocab.get_vocab_size('tokens'))
biterator.index_with(vocab)
dev_biterator.index_with(vocab)
# exit(0)
# Build Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
device_num = -1 if device.type == 'cpu' else 0
model = Model(weight=weight_dict['glove.840B.300d'],
vocab_size=vocab.get_vocab_size('tokens'),
embedding_dim=300, max_l=280, num_of_class=2)
model.display()
model.to(device)
# Create Log File
file_path_prefix, date = save_tool.gen_file_prefix(f"{experiment_name}")
# Save the source code.
script_name = os.path.basename(__file__)
with open(os.path.join(file_path_prefix, script_name), 'w') as out_f, open(__file__, 'r') as it:
out_f.write(it.read())
out_f.flush()
# Save source code end.
best_dev = -1
iteration = 0
i_epoch = 0
start_lr = 0.0002
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=start_lr)
criterion = nn.CrossEntropyLoss()
eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)
dev_results_list = score_converter_v0(config.T_FEVER_DEV_JSONL, complete_upstream_dev_data)
eval_mode = {'check_sent_id_correct': True, 'standard': True}
strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(dev_results_list, config.T_FEVER_DEV_JSONL,
mode=eval_mode, verbose=False)
total = len(dev_results_list)
hit = eval_mode['check_sent_id_correct_hits']
tracking_score = hit / total
print(f"Dev(raw_acc/pr/rec/f1):{acc_score}/{pr}/{rec}/{f1}/")
print("Strict score:", strict_score)
print(f"Eval Tracking score:", f"{tracking_score}")
need_save = False
if tracking_score > best_dev:
best_dev = tracking_score
need_save = True
if need_save:
save_path = os.path.join(
file_path_prefix,
f'i({iteration})_epoch({i_epoch})_'
f'(tra_score:{tracking_score}|raw_acc:{acc_score}|pr:{pr}|rec:{rec}|f1:{f1})'
)
torch.save(model.state_dict(), save_path)
print("Epoch Evaluation...")
eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)
dev_results_list = score_converter_v0(config.T_FEVER_DEV_JSONL, complete_upstream_dev_data)
eval_mode = {'check_sent_id_correct': True, 'standard': True}
strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(dev_results_list, config.T_FEVER_DEV_JSONL,
mode=eval_mode, verbose=False)
total = len(dev_results_list)
hit = eval_mode['check_sent_id_correct_hits']
tracking_score = hit / total
print(f"Dev(raw_acc/pr/rec/f1):{acc_score}/{pr}/{rec}/{f1}/")
print("Strict score:", strict_score)
print(f"Eval Tracking score:", f"{tracking_score}")
if tracking_score > best_dev:
best_dev = tracking_score
save_path = os.path.join(
file_path_prefix,
f'i({iteration})_epoch({i_epoch})_'
f'(tra_score:{tracking_score}|raw_acc:{acc_score}|pr:{pr}|rec:{rec}|f1:{f1})_epoch'
)
torch.save(model.state_dict(), save_path)
def score_converter_v0(org_data_file, full_sent_list):
"""
:param org_data_file:
:param full_sent_list: append full_sent_score list to evidence of original data file
:return:
"""
d_list = common.load_jsonl(org_data_file)
augmented_dict = dict()
print("Build selected sentences file:", len(full_sent_list))
for sent_item in tqdm(full_sent_list):
selection_id = sent_item['selection_id'] # The id for the current one selection.
org_id = int(selection_id.split('<##>')[0])
if org_id in augmented_dict:
augmented_dict[org_id].append(sent_item)
else:
augmented_dict[org_id] = [sent_item]
for item in d_list:
if int(item['id']) not in augmented_dict:
cur_predicted_sentids = []
else:
cur_predicted_sentids = [] # formating doc_id + c_score.SENTLINT + line_number
sents = augmented_dict[int(item['id'])]
# Modify some mechaism here to selection sentence whether by some score or label
for sent_i in sents:
if sent_i['prob'] >= 0.5:
cur_predicted_sentids.append((sent_i['sid'], sent_i['score']))
# del sent_i['prob']
cur_predicted_sentids = sorted(cur_predicted_sentids, key=lambda x: -x[1])
item['scored_sentids'] = cur_predicted_sentids
item['predicted_sentids'] = [sid for sid, _ in item['scored_sentids']][:5]
item['predicted_evidence'] = convert_evidence2scoring_format(item['predicted_sentids'])
item['predicted_label'] = item['label'] # give ground truth label
# Removing all score and prob
for sent_item in full_sent_list:
if 'score' in sent_item.keys():
del sent_item['score']
del sent_item['prob']
return d_list
def pipeline_first_sent_selection(org_t_file, upstream_in_file, model_save_path):
batch_size = 128
lazy = True
SAVE_PATH = model_save_path
print("Model From:", SAVE_PATH)
dev_upstream_file = upstream_in_file
# Prepare Data
token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='tokens'), # This is the raw tokens
'elmo_chars': ELMoTokenCharactersIndexer(namespace='elmo_characters') # This is the elmo_characters
}
dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=lazy)
complete_upstream_dev_data = get_full_list(org_t_file, dev_upstream_file, pred=True)
print("Dev size:", len(complete_upstream_dev_data))
dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
# Load Vocabulary
dev_biterator = BasicIterator(batch_size=batch_size)
vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli_basic")
# THis is important
vocab.add_token_to_namespace("true", namespace="selection_labels")
vocab.add_token_to_namespace("false", namespace="selection_labels")
vocab.add_token_to_namespace("hidden", namespace="selection_labels")
vocab.change_token_with_index_to_namespace("hidden", -2, namespace='selection_labels')
# Label value
vocab.get_index_to_token_vocabulary('selection_labels')
print(vocab.get_token_to_index_vocabulary('selection_labels'))
print(vocab.get_vocab_size('tokens'))
dev_biterator.index_with(vocab)
# exit(0)
# Build Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
device_num = -1 if device.type == 'cpu' else 0
model = Model(weight=weight_dict['glove.840B.300d'],
vocab_size=vocab.get_vocab_size('tokens'),
embedding_dim=300, max_l=300, num_of_class=2)
model.load_state_dict(torch.load(SAVE_PATH))
model.display()
model.to(device)
eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
dev_sent_full_list = hidden_eval(model, eval_iter, complete_upstream_dev_data)
return dev_sent_full_list
def get_score_multihop(t_data_file, additional_file, model_path, item_key='prioritized_docids_aside', top_k=6):
batch_size = 64
lazy = True
SAVE_PATH = model_path
print("Model From:", SAVE_PATH)
additional_sentence_list = get_additional_list(t_data_file, additional_file, item_key=item_key, top_k=top_k)
# Prepare Data
token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='tokens'), # This is the raw tokens
'elmo_chars': ELMoTokenCharactersIndexer(namespace='elmo_characters') # This is the elmo_characters
}
dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=lazy)
print("Additional Dev size:", len(additional_sentence_list))
dev_instances = dev_fever_data_reader.read(additional_sentence_list)
# Load Vocabulary
dev_biterator = BasicIterator(batch_size=batch_size)
vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli_basic")
# THis is important
vocab.add_token_to_namespace("true", namespace="selection_labels")
vocab.add_token_to_namespace("false", namespace="selection_labels")
vocab.add_token_to_namespace("hidden", namespace="selection_labels")
vocab.change_token_with_index_to_namespace("hidden", -2, namespace='selection_labels')
# Label value
vocab.get_index_to_token_vocabulary('selection_labels')
print(vocab.get_token_to_index_vocabulary('selection_labels'))
print(vocab.get_vocab_size('tokens'))
dev_biterator.index_with(vocab)
# exit(0)
# Build Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
device_num = -1 if device.type == 'cpu' else 0
model = Model(weight=weight_dict['glove.840B.300d'],
vocab_size=vocab.get_vocab_size('tokens'),
embedding_dim=300, max_l=300, num_of_class=2)
model.load_state_dict(torch.load(SAVE_PATH))
model.display()
model.to(device)
eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
additional_sentence_list = hidden_eval(model, eval_iter, additional_sentence_list)
return additional_sentence_list
if __name__ == "__main__":
# train_fever_v1()
# doc_model_eval()
# utest_results()
utest_results_debug()
# build_relatedness_for_train()
| 40.827014
| 218
| 0.67456
| 6,994
| 51,687
| 4.619817
| 0.068058
| 0.024697
| 0.020426
| 0.038439
| 0.828294
| 0.80861
| 0.790133
| 0.776082
| 0.766674
| 0.752716
| 0
| 0.038511
| 0.219301
| 51,687
| 1,266
| 219
| 40.827014
| 0.762217
| 0.125409
| 0
| 0.677019
| 0
| 0.014907
| 0.133004
| 0.06099
| 0
| 0
| 0
| 0
| 0.008696
| 1
| 0.027329
| false
| 0
| 0.028571
| 0
| 0.070807
| 0.095652
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
433fdb86b35046ed2a1fdbc23b831aecd9c596ef
| 3,167
|
py
|
Python
|
src/masonite/foundation/response_handler.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 1,816
|
2018-02-14T01:59:51.000Z
|
2022-03-31T17:09:20.000Z
|
src/masonite/foundation/response_handler.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 340
|
2018-02-11T00:27:26.000Z
|
2022-03-21T12:00:24.000Z
|
src/masonite/foundation/response_handler.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 144
|
2018-03-18T00:08:16.000Z
|
2022-02-26T01:51:58.000Z
|
def response_handler(environ, start_response):
"""The WSGI Application Server.
Arguments:
environ {dict} -- The WSGI environ dictionary
start_response {WSGI callable}
Returns:
WSGI Response
"""
from wsgi import application
application.bind("environ", environ)
"""Add Environ To Service Container
Add the environ to the service container. The environ is generated by the
the WSGI server above and used by a service provider to manipulate the
incoming requests
"""
# """Execute All Service Providers That Require The WSGI Server
# Run all service provider boot methods if the wsgi attribute is true.
# """
try:
for provider in application.get_providers():
application.resolve(provider.boot)
except Exception as e:
application.make("exception_handler").handle(e)
"""We Are Ready For Launch
If we have a solid response and not redirecting then we need to return
a 200 status code along with the data. If we don't, then we'll have
to return a 302 redirection to where ever the user would like go
to next.
"""
_, response = application.make("request"), application.make("response")
start_response(
response.get_status_code(),
response.get_headers() + response.cookie_jar.render_response(),
)
"""Final Step
This will take the data variable from the Service Container and return
it to the WSGI server.
"""
return iter([response.get_response_content()])
def testcase_handler(application, environ, start_response, exception_handling=True):
"""The WSGI Application Server.
Arguments:
environ {dict} -- The WSGI environ dictionary
start_response {WSGI callable}
Returns:
WSGI Response
"""
from wsgi import application
application.bind("environ", environ)
"""Add Environ To Service Container
Add the environ to the service container. The environ is generated by the
the WSGI server above and used by a service provider to manipulate the
incoming requests
"""
# """Execute All Service Providers That Require The WSGI Server
# Run all service provider boot methods if the wsgi attribute is true.
# """
try:
for provider in application.get_providers():
application.resolve(provider.boot)
except Exception as e:
if not exception_handling:
raise e
application.make("exception_handler").handle(e)
"""We Are Ready For Launch
If we have a solid response and not redirecting then we need to return
a 200 status code along with the data. If we don't, then we'll have
to return a 302 redirection to where ever the user would like go
to next.
"""
request, response = application.make("request"), application.make("response")
start_response(
response.get_status_code(),
response.get_headers() + response.cookie_jar.render_response(),
)
"""Final Step
This will take the data variable from the Service Container and return
it to the WSGI server.
"""
return (request, response)
| 30.747573
| 84
| 0.680455
| 414
| 3,167
| 5.140097
| 0.241546
| 0.039474
| 0.036654
| 0.022556
| 0.912124
| 0.912124
| 0.912124
| 0.912124
| 0.912124
| 0.912124
| 0
| 0.00504
| 0.248184
| 3,167
| 102
| 85
| 31.04902
| 0.888702
| 0.182507
| 0
| 0.666667
| 0
| 0
| 0.059316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a48931199e538f0123a6fc15897848b7bd0be42
| 7,043
|
py
|
Python
|
rayml/tests/data_checks_tests/test_id_columns_data_check.py
|
gcode-ai/rayml
|
92c4f3c6041f465fee27a6c03bd7959c4ef21124
|
[
"BSD-3-Clause"
] | null | null | null |
rayml/tests/data_checks_tests/test_id_columns_data_check.py
|
gcode-ai/rayml
|
92c4f3c6041f465fee27a6c03bd7959c4ef21124
|
[
"BSD-3-Clause"
] | null | null | null |
rayml/tests/data_checks_tests/test_id_columns_data_check.py
|
gcode-ai/rayml
|
92c4f3c6041f465fee27a6c03bd7959c4ef21124
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pandas as pd
import pytest
from rayml.data_checks import (
DataCheckActionCode,
DataCheckActionOption,
DataCheckMessageCode,
DataCheckWarning,
IDColumnsDataCheck,
)
id_data_check_name = IDColumnsDataCheck.name
def test_id_cols_data_check_init():
id_cols_check = IDColumnsDataCheck()
assert id_cols_check.id_threshold == 1.0
id_cols_check = IDColumnsDataCheck(id_threshold=0.0)
assert id_cols_check.id_threshold == 0
id_cols_check = IDColumnsDataCheck(id_threshold=0.5)
assert id_cols_check.id_threshold == 0.5
id_cols_check = IDColumnsDataCheck(id_threshold=1.0)
assert id_cols_check.id_threshold == 1.0
with pytest.raises(
ValueError, match="id_threshold must be a float between 0 and 1, inclusive."
):
IDColumnsDataCheck(id_threshold=-0.1)
with pytest.raises(
ValueError, match="id_threshold must be a float between 0 and 1, inclusive."
):
IDColumnsDataCheck(id_threshold=1.1)
def test_id_columns_warning():
X_dict = {
"col_1_id": [0, 1, 2, 3],
"col_2": [2, 3, 4, 5],
"col_3_id": [1, 1, 2, 3],
"Id": [3, 1, 2, 0],
"col_5": [0, 0, 1, 2],
"col_6": [0.1, 0.2, 0.3, 0.4],
}
X = pd.DataFrame.from_dict(X_dict)
id_cols_check = IDColumnsDataCheck(id_threshold=0.95)
assert id_cols_check.validate(X) == [
DataCheckWarning(
message="Columns 'Id', 'col_1_id', 'col_2', 'col_3_id' are 95.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": ["Id", "col_1_id", "col_2", "col_3_id"]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": ["Id", "col_1_id", "col_2", "col_3_id"]},
)
],
).to_dict(),
]
X = pd.DataFrame.from_dict(X_dict)
id_cols_check = IDColumnsDataCheck(id_threshold=1.0)
assert id_cols_check.validate(X) == [
DataCheckWarning(
message="Columns 'Id', 'col_1_id' are 100.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": ["Id", "col_1_id"]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": ["Id", "col_1_id"]},
)
],
).to_dict(),
]
def test_id_columns_strings():
X_dict = {
"col_1_id": ["a", "b", "c", "d"],
"col_2": ["w", "x", "y", "z"],
"col_3_id": [
"123456789012345",
"234567890123456",
"3456789012345678",
"45678901234567",
],
"Id": ["z", "y", "x", "a"],
"col_5": ["0", "0", "1", "2"],
"col_6": [0.1, 0.2, 0.3, 0.4],
}
X = pd.DataFrame.from_dict(X_dict)
X.ww.init(
logical_types={
"col_1_id": "categorical",
"col_2": "categorical",
"Id": "categorical",
"col_5": "categorical",
}
)
id_cols_check = IDColumnsDataCheck(id_threshold=0.95)
assert id_cols_check.validate(X) == [
DataCheckWarning(
message="Columns 'Id', 'col_1_id', 'col_2', 'col_3_id' are 95.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": ["Id", "col_1_id", "col_2", "col_3_id"]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": ["Id", "col_1_id", "col_2", "col_3_id"]},
)
],
).to_dict(),
]
id_cols_check = IDColumnsDataCheck(id_threshold=1.0)
assert id_cols_check.validate(X) == [
DataCheckWarning(
message="Columns 'Id', 'col_1_id' are 100.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": ["Id", "col_1_id"]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": ["Id", "col_1_id"]},
)
],
).to_dict(),
]
def test_id_cols_data_check_input_formats():
id_cols_check = IDColumnsDataCheck(id_threshold=0.8)
# test empty pd.DataFrame
assert id_cols_check.validate(pd.DataFrame()) == []
# test Woodwork
ww_input = pd.DataFrame(np.array([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]]))
ww_input.ww.init()
assert id_cols_check.validate(ww_input) == [
DataCheckWarning(
message="Columns '0', '1' are 80.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": [0, 1]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": [0, 1]},
)
],
).to_dict(),
]
# test 2D list
assert id_cols_check.validate([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]]) == [
DataCheckWarning(
message="Columns '0', '1' are 80.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": [0, 1]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": [0, 1]},
)
],
).to_dict(),
]
# test np.array
assert id_cols_check.validate(
np.array([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]])
) == [
DataCheckWarning(
message="Columns '0', '1' are 80.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": [0, 1]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": [0, 1]},
)
],
).to_dict(),
]
| 34.694581
| 112
| 0.554451
| 836
| 7,043
| 4.356459
| 0.108852
| 0.076606
| 0.103515
| 0.061779
| 0.842669
| 0.809171
| 0.799561
| 0.772378
| 0.737781
| 0.737781
| 0
| 0.050448
| 0.318898
| 7,043
| 202
| 113
| 34.866337
| 0.708776
| 0.009371
| 0
| 0.61236
| 0
| 0.011236
| 0.149742
| 0
| 0
| 0
| 0
| 0
| 0.067416
| 1
| 0.022472
| false
| 0
| 0.022472
| 0
| 0.044944
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a58d391afe88dcc2260da8a76b168b2bad55abb
| 26
|
py
|
Python
|
simpleFEA/elements/__init__.py
|
robsiegwart/simpleFEA
|
7c3f757a4bf92675cdc597c7e479b8a9925a6a69
|
[
"MIT"
] | 1
|
2022-02-01T11:08:31.000Z
|
2022-02-01T11:08:31.000Z
|
simpleFEA/elements/__init__.py
|
robsiegwart/simpleFEA
|
7c3f757a4bf92675cdc597c7e479b8a9925a6a69
|
[
"MIT"
] | null | null | null |
simpleFEA/elements/__init__.py
|
robsiegwart/simpleFEA
|
7c3f757a4bf92675cdc597c7e479b8a9925a6a69
|
[
"MIT"
] | null | null | null |
from .Link2D import Link2D
| 26
| 26
| 0.846154
| 4
| 26
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 0.115385
| 26
| 1
| 26
| 26
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a7488f4da9dfdd657dfe772c55b4b600581c568
| 204
|
py
|
Python
|
Server/fcm.py
|
Barengific/CharmHome
|
5ea89f35bc18863bd6c649b1574d30853a4dce82
|
[
"MIT"
] | null | null | null |
Server/fcm.py
|
Barengific/CharmHome
|
5ea89f35bc18863bd6c649b1574d30853a4dce82
|
[
"MIT"
] | null | null | null |
Server/fcm.py
|
Barengific/CharmHome
|
5ea89f35bc18863bd6c649b1574d30853a4dce82
|
[
"MIT"
] | null | null | null |
import firebase_admin
from firebase_admin import credentials
default_app = firebase_admin.initialize_app()
cred = credentials.Certificate("serviceAccountKey.json")
firebase_admin.initialize_app(cred)
| 20.4
| 56
| 0.848039
| 24
| 204
| 6.916667
| 0.5
| 0.313253
| 0.277108
| 0.313253
| 0.361446
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 204
| 9
| 57
| 22.666667
| 0.887701
| 0
| 0
| 0
| 0
| 0
| 0.108374
| 0.108374
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4a863d1c66be2bf5e86073191f41a21882bbe6f6
| 152
|
py
|
Python
|
convert/tranform.py
|
dishantvyas15/nora-covid-19-bot
|
1c2935728603de75ad2f76584eeaabad715f4007
|
[
"MIT"
] | 12
|
2020-06-30T07:04:10.000Z
|
2021-11-08T15:06:40.000Z
|
convert/tranform.py
|
dishantvyas15/nora-covid-19-bot
|
1c2935728603de75ad2f76584eeaabad715f4007
|
[
"MIT"
] | 13
|
2020-07-18T13:41:03.000Z
|
2021-10-30T05:21:56.000Z
|
convert/tranform.py
|
dishantvyas15/nora-covid-19-bot
|
1c2935728603de75ad2f76584eeaabad715f4007
|
[
"MIT"
] | 19
|
2020-06-12T07:07:59.000Z
|
2022-02-05T18:46:02.000Z
|
from rasa.nlu.convert import convert_training_data
convert_training_data(data_file="./input.json", out_file="./nlu.md", output_format="md", language="")
| 76
| 101
| 0.789474
| 23
| 152
| 4.913043
| 0.652174
| 0.265487
| 0.336283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046053
| 152
| 2
| 101
| 76
| 0.77931
| 0
| 0
| 0
| 0
| 0
| 0.143791
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
435e97748185eff1c863cc54f4580b6ab1ca8874
| 487
|
py
|
Python
|
CodeWarsKataStuff/Replace multiple items.py
|
perrymant/CodeWarsKataStuff
|
20eb25a3f0070aee5f5ae9a03a656acd5557c021
|
[
"MIT"
] | null | null | null |
CodeWarsKataStuff/Replace multiple items.py
|
perrymant/CodeWarsKataStuff
|
20eb25a3f0070aee5f5ae9a03a656acd5557c021
|
[
"MIT"
] | null | null | null |
CodeWarsKataStuff/Replace multiple items.py
|
perrymant/CodeWarsKataStuff
|
20eb25a3f0070aee5f5ae9a03a656acd5557c021
|
[
"MIT"
] | null | null | null |
t = '########### ###########\n########## ##########\n######### #########\n######## ########\n####### #######\n###### ######\n##### #####\n#### ####\n### ###\n## ##\n# #\n \n'
def invert_triangle(t):
temp = t.replace(" ","a")
temp = temp.replace("#"," ")
temp = temp.replace("a","#")
return("\n".join(temp.split('\n')[-1::-1]))
| 69.571429
| 318
| 0.203285
| 36
| 487
| 2.722222
| 0.333333
| 0.22449
| 0.306122
| 0.367347
| 0.122449
| 0.122449
| 0.122449
| 0.122449
| 0.122449
| 0.122449
| 0
| 0.006711
| 0.38809
| 487
| 6
| 319
| 81.166667
| 0.322148
| 0
| 0
| 0
| 0
| 0.166667
| 0.661191
| 0.090349
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4380b268eb19407d280bd87c6259e77617c392ea
| 7,204
|
py
|
Python
|
tests/test_fio.py
|
MaggieQi/spartan
|
24b9f977d0a9ae99e672bf90d80a0f22ac41d133
|
[
"Apache-2.0"
] | null | null | null |
tests/test_fio.py
|
MaggieQi/spartan
|
24b9f977d0a9ae99e672bf90d80a0f22ac41d133
|
[
"Apache-2.0"
] | null | null | null |
tests/test_fio.py
|
MaggieQi/spartan
|
24b9f977d0a9ae99e672bf90d80a0f22ac41d133
|
[
"Apache-2.0"
] | null | null | null |
from spartan import expr, util
from spartan.util import Assert
import test_common
import numpy as np
from scipy import sparse as sp
import os
from spartan.config import FLAGS
import unittest
class TestFIO(test_common.ClusterTest):
test_dir = None
test_dir2 = None
def create_path(self):
if self.test_dir == None:
if len(FLAGS.hosts) > 1 and FLAGS.cluster:
raise unittest.SkipTest()
else:
self.test_dir = '/tmp'
self.test_dir += '/spartan-fio-%d' % os.getuid()
self.test_dir2 = self.test_dir + '/path/path'
def test_fio_dense(self):
self.create_path()
t1 = expr.arange((100, 100)).force()
Assert.eq(expr.save(t1, "fiotest1", self.test_dir, False), True)
Assert.all_eq(t1.glom(), expr.load("fiotest1", self.test_dir, False).glom())
Assert.eq(expr.save(t1, "fiotest1", self.test_dir, True), True)
Assert.all_eq(t1.glom(), expr.load("fiotest1", self.test_dir, True).glom())
Assert.eq(expr.pickle(t1, "fiotest2", self.test_dir, False), True)
Assert.all_eq(t1.glom(), expr.unpickle("fiotest2", self.test_dir, False).glom())
Assert.eq(expr.pickle(t1, "fiotest2", self.test_dir, True), True)
Assert.all_eq(t1.glom(), expr.unpickle("fiotest2", self.test_dir, True).glom())
def test_fio_sparse(self):
self.create_path()
t1 = expr.sparse_rand((100, 100)).force()
Assert.eq(expr.save(t1, "fiotest3", self.test_dir, False), True)
Assert.all_eq(t1.glom().todense(), expr.load("fiotest3", self.test_dir, False).glom().todense())
Assert.eq(expr.save(t1, "fiotest3", self.test_dir, True), True)
Assert.all_eq(t1.glom().todense(), expr.load("fiotest3", self.test_dir, True).glom().todense())
Assert.eq(expr.pickle(t1, "fiotest4", self.test_dir, False), True)
Assert.all_eq(t1.glom().todense(), expr.unpickle("fiotest4", self.test_dir, False).glom().todense())
Assert.eq(expr.pickle(t1, "fiotest4", self.test_dir, True), True)
Assert.all_eq(t1.glom().todense(), expr.unpickle("fiotest4", self.test_dir, True).glom().todense())
def test_fio_partial_dense(self):
self.create_path()
t1 = expr.randn(300, 300).force()
expr.save(t1, "fiotest_partial1", self.test_dir, False)
expr.pickle(t1, "fiotest_partial2", self.test_dir, False)
t2 = expr.load("fiotest_partial1", self.test_dir, False)
test_tiles = {}
for ex, v in t1.tiles.iteritems():
test_tiles[ex] = v.worker
test_tiles = expr.partial_load(test_tiles, "fiotest_partial1", self.test_dir, False)
for ex, v in test_tiles.iteritems():
t1.tiles[ex] = v
Assert.all_eq(t1.glom(), t2.glom())
test_tiles = {}
for ex, v in t1.tiles.iteritems():
test_tiles[ex] = v.worker
test_tiles = expr.partial_unpickle(test_tiles, "fiotest_partial2", self.test_dir, False)
for ex, v in test_tiles.iteritems():
t1.tiles[ex] = v
Assert.all_eq(t1.glom(), t2.glom())
def test_fio_partial_sparse(self):
self.create_path()
t1 = expr.sparse_rand((300, 300)).force()
expr.save(t1, "fiotest_partial1", self.test_dir, False)
expr.pickle(t1, "fiotest_partial2", self.test_dir, False)
t2 = expr.load("fiotest_partial1", self.test_dir, False)
test_tiles = {}
for ex, v in t1.tiles.iteritems():
test_tiles[ex] = v.worker
test_tiles = expr.partial_load(test_tiles, "fiotest_partial1", self.test_dir, False)
for ex, v in test_tiles.iteritems():
t1.tiles[ex] = v
Assert.all_eq(t1.glom().todense(), t2.glom().todense())
test_tiles = {}
for ex, v in t1.tiles.iteritems():
test_tiles[ex] = v.worker
test_tiles = expr.partial_unpickle(test_tiles, "fiotest_partial2", self.test_dir, False)
for ex, v in test_tiles.iteritems():
t1.tiles[ex] = v
Assert.all_eq(t1.glom().todense(), t2.glom().todense())
# This test can't pass on both clusters and single machine.
# Mark it to avoid anonying situations.
def test_fio_path(self):
self.create_path()
t1 = expr.randn(100, 100).force()
expr.save(t1, "fiotest1", self.test_dir2, False)
expr.pickle(t1, "fiotest2", self.test_dir2, False)
Assert.all_eq(t1.glom(), expr.load("fiotest1", self.test_dir2, False).glom())
Assert.all_eq(t1.glom(), expr.unpickle("fiotest2", self.test_dir2, False).glom())
def profile1(self):
self.create_path()
t1 = expr.arange((1000, 1000)).force()
time_a, a = util.timeit(lambda: expr.save(t1, "fiotest3", self.test_dir, False))
util.log_info('Save a %s dense array in %s without zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.load("fiotest3", self.test_dir, False).force())
util.log_info('Load a %s dense array in %s without zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.save(t1, "fiotest3", self.test_dir, True))
util.log_info('Save a %s dense array in %s with zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.load("fiotest3", self.test_dir, True).force())
util.log_info('Load a %s dense array in %s with zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.pickle(t1, "fiotest4", self.test_dir, False))
util.log_info('Pickle a %s dense array in %s without zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.unpickle("fiotest4", self.test_dir, False).force())
util.log_info('Unpickle a %s dense array in %s without zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.pickle(t1, "fiotest4", self.test_dir, True))
util.log_info('Pickle a %s dense array in %s with zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.unpickle("fiotest4", self.test_dir, True).force())
util.log_info('Unpickle a %s dense array in %s with zip', t1.shape, time_a)
def profile2(self):
self.create_path()
t1 = expr.sparse_rand((10000, 10000)).force()
time_a, a = util.timeit(lambda: expr.save(t1, "fiotest3", self.test_dir, False))
util.log_info('Save a %s sparse array in %s without zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.load("fiotest3", self.test_dir, False).force())
util.log_info('Load a %s sparse array in %s without zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.save(t1, "fiotest3", self.test_dir, True))
util.log_info('Save a %s sparse array in %s with zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.load("fiotest3", self.test_dir, True).force())
util.log_info('Load a %s sparse array in %s with zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.pickle(t1, "fiotest4", self.test_dir, False))
util.log_info('Pickle a %s sparse array in %s without zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.unpickle("fiotest4", self.test_dir, False).force())
util.log_info('Unpickle a %s sparse array in %s without zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.pickle(t1, "fiotest4", self.test_dir, True))
util.log_info('Pickle a %s sparse array in %s with zip', t1.shape, time_a)
time_a, a = util.timeit(lambda: expr.unpickle("fiotest4", self.test_dir, True).force())
util.log_info('Unpickle a %s sparse array in %s with zip', t1.shape, time_a)
if __name__ == '__main__':
import unittest
unittest.main()
| 49.342466
| 104
| 0.679484
| 1,167
| 7,204
| 4.03856
| 0.092545
| 0.086569
| 0.107363
| 0.088267
| 0.870359
| 0.858477
| 0.844685
| 0.815829
| 0.800976
| 0.757479
| 0
| 0.02912
| 0.161022
| 7,204
| 145
| 105
| 49.682759
| 0.750662
| 0.013187
| 0
| 0.457364
| 0
| 0
| 0.157191
| 0
| 0
| 0
| 0
| 0
| 0.178295
| 1
| 0.062016
| false
| 0
| 0.069767
| 0
| 0.155039
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
43b6ac3af3dca32fd7b3c58e4af1fe8e08dcfb92
| 80
|
py
|
Python
|
JDjango/api/djangotools/urls/__init__.py
|
JIYANG-PLUS/JDjango
|
57cbb13b2b4c07f34d546c0c637c22f60c1e692a
|
[
"MIT"
] | 3
|
2020-12-28T05:09:02.000Z
|
2021-06-23T10:02:03.000Z
|
JDjango/api/djangotools/urls/__init__.py
|
JIYANG-PLUS/JDjango
|
57cbb13b2b4c07f34d546c0c637c22f60c1e692a
|
[
"MIT"
] | null | null | null |
JDjango/api/djangotools/urls/__init__.py
|
JIYANG-PLUS/JDjango
|
57cbb13b2b4c07f34d546c0c637c22f60c1e692a
|
[
"MIT"
] | null | null | null |
from .gets import *
from .sets import *
from .judge import *
from .fix import *
| 16
| 20
| 0.7
| 12
| 80
| 4.666667
| 0.5
| 0.535714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 80
| 4
| 21
| 20
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
43b8af01b527b7b3935d667412b4d5f90b7cee18
| 21
|
py
|
Python
|
src/vscodeextension/src/test/testfiles/python/01_function.py
|
DarkTrick/SourceCodeVisualizer
|
8a68c36cfdbffdb87593c1c558e82abec66dbfc2
|
[
"BSD-3-Clause"
] | 11
|
2022-03-03T13:02:07.000Z
|
2022-03-20T19:37:14.000Z
|
src/vscodeextension/src/test/testfiles/python/01_function.py
|
DarkTrick/SourceCodeVisualizer
|
8a68c36cfdbffdb87593c1c558e82abec66dbfc2
|
[
"BSD-3-Clause"
] | 1
|
2022-03-07T20:56:40.000Z
|
2022-03-09T04:00:25.000Z
|
src/vscodeextension/src/test/testfiles/python/01_function.py
|
DarkTrick/SourceCodeVisualizer
|
8a68c36cfdbffdb87593c1c558e82abec66dbfc2
|
[
"BSD-3-Clause"
] | 1
|
2022-01-27T03:15:28.000Z
|
2022-01-27T03:15:28.000Z
|
def foo():
return 5
| 10.5
| 10
| 0.619048
| 4
| 21
| 3.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.238095
| 21
| 2
| 11
| 10.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
600f74acae2cc449dbb7b4a83689cc5a106f29c9
| 37
|
py
|
Python
|
elliot/recommender/latent_factor_models/MF/__init__.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 175
|
2021-03-04T15:46:25.000Z
|
2022-03-31T05:56:58.000Z
|
elliot/recommender/latent_factor_models/MF/__init__.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 15
|
2021-03-06T17:53:56.000Z
|
2022-03-24T17:02:07.000Z
|
elliot/recommender/latent_factor_models/MF/__init__.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 39
|
2021-03-04T15:46:26.000Z
|
2022-03-09T15:37:12.000Z
|
from .matrix_factorization import MF
| 18.5
| 36
| 0.864865
| 5
| 37
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6011049b2ca167ca23fc367736f5ade3a47da20d
| 191
|
py
|
Python
|
twitchbot/util/__init__.py
|
cvangheem/Twitchbot
|
48bb065951e88e4d2e9ef8d0c1a3afb0150a5eb5
|
[
"MIT"
] | null | null | null |
twitchbot/util/__init__.py
|
cvangheem/Twitchbot
|
48bb065951e88e4d2e9ef8d0c1a3afb0150a5eb5
|
[
"MIT"
] | null | null | null |
twitchbot/util/__init__.py
|
cvangheem/Twitchbot
|
48bb065951e88e4d2e9ef8d0c1a3afb0150a5eb5
|
[
"MIT"
] | null | null | null |
from .register_util import *
from .twitch_api_util import *
from .message_util import *
from .task_util import *
from .misc_util import *
from .command_util import *
from .dict_util import *
| 23.875
| 30
| 0.780105
| 29
| 191
| 4.862069
| 0.37931
| 0.496454
| 0.595745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146597
| 191
| 7
| 31
| 27.285714
| 0.865031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
601d6729a21b1bacc009432d67eb89f1dcb0c9ce
| 24
|
py
|
Python
|
mytoolbox/__init__.py
|
bruno154/project-4-cardio-catch-disease
|
24942c356689dd0f733259c12a5479d8b0e62adf
|
[
"MIT"
] | null | null | null |
mytoolbox/__init__.py
|
bruno154/project-4-cardio-catch-disease
|
24942c356689dd0f733259c12a5479d8b0e62adf
|
[
"MIT"
] | null | null | null |
mytoolbox/__init__.py
|
bruno154/project-4-cardio-catch-disease
|
24942c356689dd0f733259c12a5479d8b0e62adf
|
[
"MIT"
] | null | null | null |
from .mytoolbox import *
| 24
| 24
| 0.791667
| 3
| 24
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6049244c3ce9fc1d1c2a8fc1925a47eaa6d59fe5
| 7,935
|
py
|
Python
|
shutterstock_api/__init__.py
|
Lumen5/shutterstock-api
|
d26db2c9cd6688cf828ad15478bf1b4701150a3f
|
[
"Adobe-Glyph"
] | 1
|
2021-02-23T16:15:16.000Z
|
2021-02-23T16:15:16.000Z
|
shutterstock_api/__init__.py
|
Lumen5/shutterstock-api
|
d26db2c9cd6688cf828ad15478bf1b4701150a3f
|
[
"Adobe-Glyph"
] | 17
|
2019-07-13T01:23:08.000Z
|
2022-03-21T07:17:35.000Z
|
shutterstock_api/__init__.py
|
Lumen5/shutterstock-api
|
d26db2c9cd6688cf828ad15478bf1b4701150a3f
|
[
"Adobe-Glyph"
] | 1
|
2021-03-07T19:16:27.000Z
|
2021-03-07T19:16:27.000Z
|
# coding: utf-8
# flake8: noqa
"""
Shutterstock API Reference
The Shutterstock API provides access to Shutterstock's library of media, as well as information about customers' accounts and the contributors that provide the media. # noqa: E501
OpenAPI spec version: 1.0.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from shutterstock_api.api.audio_api import AudioApi
from shutterstock_api.api.contributors_api import ContributorsApi
from shutterstock_api.api.editorial_api import EditorialApi
from shutterstock_api.api.images_api import ImagesApi
from shutterstock_api.api.test_api import TestApi
from shutterstock_api.api.users_api import UsersApi
from shutterstock_api.api.videos_api import VideosApi
# import ApiClient
from shutterstock_api.api_client import ApiClient
from shutterstock_api.configuration import Configuration
# import models into sdk package
from shutterstock_api.models.access_token_details import AccessTokenDetails
from shutterstock_api.models.album import Album
from shutterstock_api.models.allotment import Allotment
from shutterstock_api.models.artist import Artist
from shutterstock_api.models.audio import Audio
from shutterstock_api.models.audio_asset_details import AudioAssetDetails
from shutterstock_api.models.audio_assets import AudioAssets
from shutterstock_api.models.audio_data_list import AudioDataList
from shutterstock_api.models.audio_search_results import AudioSearchResults
from shutterstock_api.models.category import Category
from shutterstock_api.models.category_data_list import CategoryDataList
from shutterstock_api.models.collection import Collection
from shutterstock_api.models.collection_create_request import CollectionCreateRequest
from shutterstock_api.models.collection_create_response import CollectionCreateResponse
from shutterstock_api.models.collection_data_list import CollectionDataList
from shutterstock_api.models.collection_item import CollectionItem
from shutterstock_api.models.collection_item_data_list import CollectionItemDataList
from shutterstock_api.models.collection_item_request import CollectionItemRequest
from shutterstock_api.models.collection_update_request import CollectionUpdateRequest
from shutterstock_api.models.contributor import Contributor
from shutterstock_api.models.contributor_profile import ContributorProfile
from shutterstock_api.models.contributor_profile_data_list import ContributorProfileDataList
from shutterstock_api.models.contributor_profile_social_media import ContributorProfileSocialMedia
from shutterstock_api.models.cookie import Cookie
from shutterstock_api.models.download_history import DownloadHistory
from shutterstock_api.models.download_history_data_list import DownloadHistoryDataList
from shutterstock_api.models.download_history_format_details import DownloadHistoryFormatDetails
from shutterstock_api.models.download_history_media_details import DownloadHistoryMediaDetails
from shutterstock_api.models.download_history_user_details import DownloadHistoryUserDetails
from shutterstock_api.models.editorial_assets import EditorialAssets
from shutterstock_api.models.editorial_category import EditorialCategory
from shutterstock_api.models.editorial_content import EditorialContent
from shutterstock_api.models.editorial_content_data_list import EditorialContentDataList
from shutterstock_api.models.editorial_cover_item import EditorialCoverItem
from shutterstock_api.models.editorial_livefeed import EditorialLivefeed
from shutterstock_api.models.editorial_livefeed_list import EditorialLivefeedList
from shutterstock_api.models.editorial_search_results import EditorialSearchResults
from shutterstock_api.models.error import Error
from shutterstock_api.models.featured_collection import FeaturedCollection
from shutterstock_api.models.featured_collection_cover_item import FeaturedCollectionCoverItem
from shutterstock_api.models.featured_collection_data_list import FeaturedCollectionDataList
from shutterstock_api.models.genre_list import GenreList
from shutterstock_api.models.image import Image
from shutterstock_api.models.image_assets import ImageAssets
from shutterstock_api.models.image_create_request import ImageCreateRequest
from shutterstock_api.models.image_create_response import ImageCreateResponse
from shutterstock_api.models.image_data_list import ImageDataList
from shutterstock_api.models.image_search_results import ImageSearchResults
from shutterstock_api.models.image_size_details import ImageSizeDetails
from shutterstock_api.models.instrument_list import InstrumentList
from shutterstock_api.models.license_audio import LicenseAudio
from shutterstock_api.models.license_audio_request import LicenseAudioRequest
from shutterstock_api.models.license_audio_result import LicenseAudioResult
from shutterstock_api.models.license_audio_result_data_list import LicenseAudioResultDataList
from shutterstock_api.models.license_editorial_content import LicenseEditorialContent
from shutterstock_api.models.license_editorial_content_request import LicenseEditorialContentRequest
from shutterstock_api.models.license_editorial_content_result import LicenseEditorialContentResult
from shutterstock_api.models.license_editorial_content_result_data_list import LicenseEditorialContentResultDataList
from shutterstock_api.models.license_format import LicenseFormat
from shutterstock_api.models.license_image import LicenseImage
from shutterstock_api.models.license_image_request import LicenseImageRequest
from shutterstock_api.models.license_image_result import LicenseImageResult
from shutterstock_api.models.license_image_result_data_list import LicenseImageResultDataList
from shutterstock_api.models.license_request_metadata import LicenseRequestMetadata
from shutterstock_api.models.license_video import LicenseVideo
from shutterstock_api.models.license_video_request import LicenseVideoRequest
from shutterstock_api.models.license_video_result import LicenseVideoResult
from shutterstock_api.models.license_video_result_data_list import LicenseVideoResultDataList
from shutterstock_api.models.model import Model
from shutterstock_api.models.model_release import ModelRelease
from shutterstock_api.models.mood_list import MoodList
from shutterstock_api.models.price import Price
from shutterstock_api.models.recommendation import Recommendation
from shutterstock_api.models.recommendation_data_list import RecommendationDataList
from shutterstock_api.models.redownload_image import RedownloadImage
from shutterstock_api.models.redownload_video import RedownloadVideo
from shutterstock_api.models.subscription import Subscription
from shutterstock_api.models.subscription_data_list import SubscriptionDataList
from shutterstock_api.models.subscription_metadata import SubscriptionMetadata
from shutterstock_api.models.test_echo import TestEcho
from shutterstock_api.models.test_validate import TestValidate
from shutterstock_api.models.test_validate_header import TestValidateHeader
from shutterstock_api.models.test_validate_query import TestValidateQuery
from shutterstock_api.models.thumbnail import Thumbnail
from shutterstock_api.models.updated_media import UpdatedMedia
from shutterstock_api.models.updated_media_data_list import UpdatedMediaDataList
from shutterstock_api.models.url import Url
from shutterstock_api.models.urls import Urls
from shutterstock_api.models.user_details import UserDetails
from shutterstock_api.models.user_post_request import UserPostRequest
from shutterstock_api.models.user_post_response import UserPostResponse
from shutterstock_api.models.video import Video
from shutterstock_api.models.video_assets import VideoAssets
from shutterstock_api.models.video_data_list import VideoDataList
from shutterstock_api.models.video_search_results import VideoSearchResults
from shutterstock_api.models.video_size_details import VideoSizeDetails
| 62.480315
| 184
| 0.904852
| 961
| 7,935
| 7.190427
| 0.207076
| 0.232272
| 0.288712
| 0.347323
| 0.462808
| 0.274385
| 0.06686
| 0.01563
| 0
| 0
| 0
| 0.001213
| 0.064776
| 7,935
| 126
| 185
| 62.97619
| 0.929929
| 0.051544
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6049ccddeed2e144cbe9fe1ff93de70bd55c6964
| 3,281
|
py
|
Python
|
igparser/dump.py
|
HudaJr/TAGRAM
|
da58f0a47dd3f0be51f33240f23815ac682108fa
|
[
"MIT"
] | null | null | null |
igparser/dump.py
|
HudaJr/TAGRAM
|
da58f0a47dd3f0be51f33240f23815ac682108fa
|
[
"MIT"
] | null | null | null |
igparser/dump.py
|
HudaJr/TAGRAM
|
da58f0a47dd3f0be51f33240f23815ac682108fa
|
[
"MIT"
] | 3
|
2020-08-05T06:50:57.000Z
|
2020-10-15T12:35:59.000Z
|
from . import parsing
from . import output
from . import exception
from .checker import *
import re
def post_home(ses):
html = ses.session.get("https://instagram.com").text
json_ = parsing.get_dataLoaded(html)
data = json_["user"]["edge_web_feed_timeline"]["edges"]
data = parsing.sorting(lambda x: output.Post(ses, x), data)
idPeople = json_["user"]["id"]
next = json_["user"]["edge_web_feed_timeline"]["page_info"].get("end_cursor")
return output.Output(items = data, data = json_, idPeople = idPeople, next = next)
def post_people(ses, usernamePeople = None):
try:
html = ses.session.get("https://instagram.com/{}".format(usernamePeople)).text
json_ = parsing.get_sharedData(html)
data = json_["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_owner_to_timeline_media"]["edges"]
data = parsing.sorting(lambda x: output.Post(ses, x), data)
next = json_["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_owner_to_timeline_media"]["page_info"].get("end_cursor")
idPeople = json_["entry_data"]["ProfilePage"][0]["graphql"]["user"]["id"]
return output.Output(items = data, data = json_, idPeople = idPeople, next = next)
except KeyError:
raise exception.PeopleNotFound(usernamePeople)
def follower_people(ses, usernamePeople = None, idPeople = None):
@err_handler(json.decoder.JSONDecodeError, lambda: exception.CookiesInvalid())
@err_handler(KeyError, lambda: exception.PeopleNotFound(usernamePeople if usernamePeople else idPeople))
def inner(idPeople):
if not idPeople:
idPeople = ses.session.get("https://instagram.com/{}?__a=1".format(usernamePeople)).json()["logging_page_id"].replace("profilePage_", "")
json_ = ses.session.get("https://www.instagram.com/graphql/query/?query_hash=c76146de99bb02f6415203be841dd25a&variables=%7B%22id%22%3A%22{}%22%2C%22include_reel%22%3Atrue%2C%22fetch_mutual%22%3Atrue%2C%22first%22%3A24%7D".format(idPeople)).json()
data = json_["data"]["user"]["edge_followed_by"]["edges"]
data = parsing.sorting(lambda x: output.People(ses, x), data)
next = json_["data"]["user"]["edge_followed_by"]["page_info"].get("end_cursor")
return output.Output(items = data, data = json_, idPeople = idPeople, next = next)
return inner(idPeople)
def following_people(ses, usernamePeople = None, idPeople = None):
@err_handler(json.decoder.JSONDecodeError, lambda: exception.CookiesInvalid())
@err_handler(KeyError, lambda: exception.PeopleNotFound(usernamePeople if usernamePeople else idPeople))
def inner(idPeople):
if not idPeople:
idPeople = ses.session.get("https://instagram.com/{}?__a=1".format(usernamePeople)).json()["logging_page_id"].replace("profilePage_", "")
json_ = ses.session.get("https://www.instagram.com/graphql/query/?query_hash=d04b0a864b4b54837c0d870b0e77e076&variables=%7B%22id%22%3A%22{}%22%2C%22include_reel%22%3Atrue%2C%22fetch_mutual%22%3Atrue%2C%22first%22%3A24%7D".format(idPeople)).json()
data = json_["data"]["user"]["edge_follow"]["edges"]
data = parsing.sorting(lambda x: output.People(ses, x), data)
next = json_["data"]["user"]["edge_follow"]["page_info"].get("end_cursor")
return output.Output(items = data, data = json_, idPeople = idPeople, next = next)
return inner(idPeople)
| 53.786885
| 249
| 0.72295
| 428
| 3,281
| 5.366822
| 0.21729
| 0.027862
| 0.033957
| 0.047018
| 0.828037
| 0.812364
| 0.780148
| 0.734872
| 0.734872
| 0.734872
| 0
| 0.037543
| 0.10698
| 3,281
| 60
| 250
| 54.683333
| 0.746416
| 0
| 0
| 0.416667
| 0
| 0.041667
| 0.292857
| 0.031056
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.104167
| 0
| 0.354167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
60743680cda564c7b313515305c8043b39525fc7
| 110
|
py
|
Python
|
jobs/admin.py
|
diegolis/search_job
|
65b7898d587a61eba008ea1503bf2b3410ac6a98
|
[
"Apache-2.0"
] | null | null | null |
jobs/admin.py
|
diegolis/search_job
|
65b7898d587a61eba008ea1503bf2b3410ac6a98
|
[
"Apache-2.0"
] | null | null | null |
jobs/admin.py
|
diegolis/search_job
|
65b7898d587a61eba008ea1503bf2b3410ac6a98
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from models import *
admin.site.register(Company)
admin.site.register(Job)
| 15.714286
| 32
| 0.8
| 16
| 110
| 5.5
| 0.625
| 0.25
| 0.386364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109091
| 110
| 6
| 33
| 18.333333
| 0.897959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6078d3c309cf4c391fd53ab96c862248c0fd79f6
| 58
|
py
|
Python
|
maili-develop/home/conf.py
|
fortyMiles/my-family
|
d827b7fa36753726318fcf9e55d0b482fdf8323d
|
[
"BSD-3-Clause"
] | null | null | null |
maili-develop/home/conf.py
|
fortyMiles/my-family
|
d827b7fa36753726318fcf9e55d0b482fdf8323d
|
[
"BSD-3-Clause"
] | null | null | null |
maili-develop/home/conf.py
|
fortyMiles/my-family
|
d827b7fa36753726318fcf9e55d0b482fdf8323d
|
[
"BSD-3-Clause"
] | null | null | null |
default_home_pic = '2211f3027e6e682361c552cd6c721e08.png'
| 29
| 57
| 0.87931
| 5
| 58
| 9.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.436364
| 0.051724
| 58
| 1
| 58
| 58
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0.62069
| 0.62069
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
60cf3cd23a7fb56ece82848669b34446cc6120c6
| 37
|
py
|
Python
|
bloomfilter/__init__.py
|
coneco/Bloomfilter4py3
|
5b9780619a12b74eab5a942e718857d7742b9ce7
|
[
"MIT"
] | 2
|
2017-08-21T07:47:09.000Z
|
2018-09-04T07:32:11.000Z
|
bloomfilter/__init__.py
|
coneco/Bloomfilter4py3
|
5b9780619a12b74eab5a942e718857d7742b9ce7
|
[
"MIT"
] | null | null | null |
bloomfilter/__init__.py
|
coneco/Bloomfilter4py3
|
5b9780619a12b74eab5a942e718857d7742b9ce7
|
[
"MIT"
] | null | null | null |
from .bloomfilter import Bloomfilter
| 18.5
| 36
| 0.864865
| 4
| 37
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
60e133a81b293957a9942cef93486d6162210b2a
| 264
|
py
|
Python
|
declare_qtquick/control/__init__.py
|
likianta/declare-qtquick
|
93c2ce49d841ccdeb0272085c5f731139927f0d7
|
[
"MIT"
] | 3
|
2021-11-02T03:45:27.000Z
|
2022-03-27T05:33:36.000Z
|
declare_qtquick/control/__init__.py
|
likianta/declare-qtquick
|
93c2ce49d841ccdeb0272085c5f731139927f0d7
|
[
"MIT"
] | null | null | null |
declare_qtquick/control/__init__.py
|
likianta/declare-qtquick
|
93c2ce49d841ccdeb0272085c5f731139927f0d7
|
[
"MIT"
] | null | null | null |
from . import traits
from .context_manager import ctx_mgr
from .id_system import gen_id
from .id_system import get_id_level
from .id_system import id_gen
from .id_system import id_mgr
from .traits import ConstantEnumeration
from .traits import PropGetterAndSetter
| 29.333333
| 39
| 0.848485
| 42
| 264
| 5.071429
| 0.333333
| 0.112676
| 0.225352
| 0.338028
| 0.187793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 264
| 8
| 40
| 33
| 0.918103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7149f02f3ef06df258411e73ce69baeffe81b5a3
| 18,635
|
py
|
Python
|
maccorcyclingdata/.ipynb_checkpoints/validate-checkpoint.py
|
jasonkuo88/maccorcyclingdata
|
dffcc5bbb4135f025b44303243928f8f0b121af9
|
[
"MIT"
] | 2
|
2021-03-29T15:34:22.000Z
|
2022-03-12T13:52:40.000Z
|
maccorcyclingdata/.ipynb_checkpoints/validate-checkpoint.py
|
jasonkuo88/maccorcyclingdata
|
dffcc5bbb4135f025b44303243928f8f0b121af9
|
[
"MIT"
] | 10
|
2020-08-25T22:25:59.000Z
|
2021-08-23T20:51:10.000Z
|
maccorcyclingdata/.ipynb_checkpoints/validate-checkpoint.py
|
jasonkuo88/maccorcyclingdata
|
dffcc5bbb4135f025b44303243928f8f0b121af9
|
[
"MIT"
] | 2
|
2020-10-12T20:48:35.000Z
|
2021-10-02T00:11:26.000Z
|
import pandas as pd
import numpy as np
from datetime import datetime
from maccorcyclingdata.schedules import sort_scheduler_steps
def validation_check_time_interval(validation_df, df, time_interval, i, cell_id):
"""
This function will validate the testdata to make sure data was collected regularly at the correct time interval.
Parameters
-----------
validation_df : pandas dataframe
The validation dataframe where any errors will be recorded
df : pandas dataframe
The testdata dataframe
time_interval : integer
The time interval between data point. How often data should be collected.
i : integer
An integer of the index where you want to validate
cell_id : integer
The cell id of the testdata
Returns
--------
validation_df : pandas dataframe
The validation dataframe with any errors listed
Examples
---------
>>> import maccorcyclingdata.validate as validate
>>> validation_df = validate.validation_check_time_interval(validation_df, df, 10, i, 1)
>>> validation_df
"""
if df['test_time_s'][i] > (df['test_time_s'][i-1] + time_interval):
validation_df = validation_df.append({'time':datetime.now().strftime("%d/%m/%Y %H:%M:%S"), 'run': 'in progress', 'cell_num': cell_id, 'row_number': i, 'error': ('anomaly - more than ' + str(time_interval) + ' seconds has passed since the last collected data')}, ignore_index=True)
return validation_df
def validation_check_temp_interval(validation_df, df, temp_interval, i, cell_id):
"""
This function will validate the testdata to make sure the temperature does not fluctuate suddenly.
Parameters
-----------
validation_df : pandas dataframe
The validation dataframe where any errors will be recorded
df : pandas dataframe
The testdata dataframe
temp_interval : integer
The maximum temperature change allowed between two data points.
i : integer
An integer of the index where you want to validate
cell_id : integer
The cell id of the testdata
Returns
--------
validation_df : pandas dataframe
The validation dataframe with any errors listed
Examples
---------
>>> import maccorcyclingdata.validate as validate
>>> validation_df = validate.validation_check_temp_interval(validation_df, df, 10, i, 1)
>>> validation_df
"""
if (df['thermocouple_temp_c'][i] >= (df['thermocouple_temp_c'][i-1] + temp_interval)) or (df['thermocouple_temp_c'][i] <= (df['thermocouple_temp_c'][i-1] - temp_interval)):
validation_df = validation_df.append({'time':datetime.now().strftime("%d/%m/%Y %H:%M:%S"), 'run': 'in progress', 'cell_num': cell_id, 'row_number': i, 'error': 'anomaly - jump in temperature (more than ' + str(temp_interval) + ' degrees)'}, ignore_index=True)
return validation_df
def validation_check_advanced_cycle(validation_df, df, i, cell_id):
"""
This function will validate the testdata against the advance cycle steps by making sure the cycle advances
Parameters
-----------
validation_df : pandas dataframe
The validation dataframe where any errors will be recorded
df : pandas dataframe
The testdata dataframe
i : integer
An integer of the index where you want to validate
cell_id : integer
The cell id of the testdata
Returns
--------
validation_df : pandas dataframe
The validation dataframe with any errors listed
Examples
---------
>>> import maccorcyclingdata.validate as validate
>>> validation_df = validate.validation_check_advanced_cycle(validation_df, df, i, 1)
>>> validation_df
"""
if df['cyc'][i] != (df['cyc'][i-1] + 1):
validation_df = validation_df.append({'time':datetime.now().strftime("%d/%m/%Y %H:%M:%S"), 'run': 'in progress', 'cell_num': cell_id, 'row_number': i, 'error': 'error - the cycle did not advance properly'}, ignore_index=True)
return validation_df
def validation_check_charging(validation_df, df, schedule_df, i, cell_id, char_tol=2):
"""
This function will validate the testdata against the charging steps by making sure the current is within 5 of the schedule file's instructions
Parameters
-----------
validation_df : pandas dataframe
The validation dataframe where any errors will be recorded
df : pandas dataframe
The testdata dataframe
schedule_df : pandas dataframe
The dataframe of the cleaned schedule file
i : integer
An integer of the index where you want to validate
cell_id : integer
The cell id of the testdata
char_tol : integer
Sets the tolerance between the current/discharging current values and the set value in the schedule file. Default is 2.
Returns
--------
validation_df : pandas dataframe
The validation dataframe with any errors listed
Examples
---------
>>> import maccorcyclingdata.validate as validate
>>> validation_df = validate.validation_check_charging(validation_df, df, schedule_df, i, 1)
>>> validation_df
"""
step = df['step'][i]
mode = schedule_df['step_mode'][step+1]
mode_value = schedule_df['step_mode_value'][step+1]
limit = schedule_df['step_limit'][step+1]
limit_value = schedule_df['step_limit_value'][step+1]
if mode == 'Current':
mode = 'current_ma'
mode_value = mode_value * 1000
if ((round(df[mode][i]) + char_tol) >= mode_value) or ((round(df[mode][i]) - char_tol) <= mode_value):
return validation_df
elif mode == 'Voltage':
mode = 'voltage_v'
if (round(df[mode][i], 1)) == mode_value:
return validation_df
if not pd.isna(limit):
if limit == 'Current':
limit = 'current_ma'
limit_value = limit_value * 1000
if ((round(df[limit][i]) + char_tol) >= limit_value) or ((round(df[limit][i]) - char_tol) <= limit_value):
return validation_df
elif limit == 'Voltage':
limit = 'voltage_v'
if (round(df[limit][i], 1)) == limit_value:
return validation_df
validation_df = validation_df.append({'time':datetime.now().strftime("%d/%m/%Y %H:%M:%S"), 'run': 'in progress', 'cell_num': str(cell_id), 'row_number': str(i), 'error': 'error - ' + str(mode) + ' is at the wrong value'}, ignore_index=True)
return validation_df
def validation_check_discharging(validation_df, df, schedule_df, i, cell_id, discharge_neg, char_tol=2):
"""
This function will validate the testdata against the discharging steps by making sure the current is negative
Parameters
-----------
validation_df : pandas dataframe
The validation dataframe where any errors will be recorded
df : pandas dataframe
The testdata dataframe
schedule_df : pandas dataframe
The dataframe of the cleaned schedule file
i : integer
An integer of the index where you want to validate
cell_id : integer
The cell id of the testdata
discharge_neg : boolean
Set to True if the current is exported as negative during discharge steps.
char_tol : integer
Sets the tolerance between the current/discharging current values and the set value in the schedule file. Default is 2.
Returns
--------
validation_df : pandas dataframe
The validation dataframe with any errors listed
Examples
---------
>>> import maccorcyclingdata.validate as validate
>>> validation_df = validate.validation_check_discharging(validation_df, df, schedule_df, i, 1, True)
>>> validation_df
"""
step = df['step'][i]
mode = schedule_df['step_mode'][step-1]
mode_value = schedule_df['step_mode_value'][step-1]
limit = schedule_df['step_limit'][step-1]
limit_value = schedule_df['step_limit_value'][step-1]
if mode == 'Current':
mode = 'current_ma'
mode_value = mode_value * 1000
if discharge_neg:
mode_value = -mode_value
if ((round(df[mode][i]) + char_tol) >= mode_value) or ((round(df[mode][i]) - char_tol) <= mode_value):
return validation_df
elif mode == 'Voltage':
mode = 'voltage_v'
if (round(df[mode][i], 1)) == mode_value:
return validation_df
if not pd.isna(limit):
if limit == 'Current':
limit = 'current_ma'
limit_value = limit_value * 1000
if discharge_neg:
limit_value = -limit_value
if ((round(df[limit][i]) + char_tol) >= limit_value) or ((round(df[limit][i]) - char_tol) <= limit_value):
return validation_df
elif limit == 'Voltage':
limit = 'voltage_v'
if (round(df[limit][i], 1)) == limit_value:
return validation_df
validation_df = validation_df.append({'time':str(datetime.now().strftime("%d/%m/%Y %H:%M:%S")), 'run': 'in progress', 'cell_num': str(cell_id), 'row_number': str(i), 'error': 'error - ' + str(mode) + ' is at the wrong value'}, ignore_index=True)
return validation_df
def validation_check_max_step_num(validation_df, df, max_step, i, cell_id):
"""
This function will validate the testdata against the max step by making sure no steps surpass the max.
Parameters
-----------
validation_df : pandas dataframe
The validation dataframe where any errors will be recorded
df : pandas dataframe
The testdata dataframe
max_step : integer
The last step from the schedule file
i : integer
An integer of the index where you want to validate
cell_id : integer
The cell id of the testdata
Returns
--------
validation_df : pandas dataframe
The validation dataframe with any errors listed
Examples
---------
>>> import maccorcyclingdata.validate as validate
>>> validation_df = validate.validation_check_max_step_num(validation_df, df, max_step, i, 1)
>>> validation_df
"""
if df['step'][i] > max_step:
validation_df = validation_df.append({'time':datetime.now().strftime("%d/%m/%Y %H:%M:%S"), 'run': 'in progress', 'cell_num': cell_id, 'row_number': i, 'error': 'error - this step number surpasses the steps in scheduler'}, ignore_index=True)
return validation_df
def validation_check_max_temp(validation_df, df, max_temp, i, cell_id, temp_tol=3):
"""
This function will validate the testdata against the max temperature by making sure no steps surpass the max.
Parameters
-----------
validation_df : pandas dataframe
The validation dataframe where any errors will be recorded
df : pandas dataframe
The testdata dataframe
max_temp : integer
The threshold for the highest temperature allowed
i : integer
An integer of the index where you want to validate
cell_id : integer
The cell id of the testdata
Returns
--------
validation_df : pandas dataframe
The validation dataframe with any errors listed
Notes
------
There are 3 possibilities of error messages:
1. warning - temperature approaching the max! (current temperature + tol > max)
2. error - temperature has surpassed the max! (current temperature >= max)
3. ABORT - temperature is way too hot! (current temperature > max + tol)
Examples
---------
>>> import maccorcyclingdata.validate as validate
>>> validation_df = validate.validation_check_max_temp(validation_df, df, 30, i, 1, 3)
>>> validation_df
"""
if ((max_temp-temp_tol) <= (df['thermocouple_temp_c'][i]) <= (max_temp+temp_tol)):
validation_df = validation_df.append({'time':datetime.now().strftime("%d/%m/%Y %H:%M:%S"), 'run': 'in progress', 'cell_num': cell_id, 'row_number': i, 'error': 'error - temperature has surpassed the max!'}, ignore_index=True)
elif ((df['thermocouple_temp_c'][i]) > (max_temp+temp_tol)):
validation_df = validation_df.append({'time':datetime.now().strftime("%d/%m/%Y %H:%M:%S"), 'run': 'in progress', 'cell_num': cell_id, 'row_number': i, 'error': 'ABORT - temperature is way too hot!'}, ignore_index=True)
elif ((max_temp-temp_tol) < (df['thermocouple_temp_c'][i])):
validation_df = validation_df.append({'time':datetime.now().strftime("%d/%m/%Y %H:%M:%S"), 'run': 'in progress', 'cell_num': cell_id, 'row_number': i, 'error': 'warning - temperature approaching the max!'}, ignore_index=True)
return validation_df
def validation_check_rest(validation_df, df, i, cell_id):
"""
This function will validate the testdata against the rest steps by making sure the current is at 0 when resting.
Parameters
-----------
validation_df : pandas dataframe
The validation dataframe where any errors will be recorded
df : pandas dataframe
The testdata dataframe
i : integer
An integer of the index where you want to validate
cell_id : integer
The cell id of the testdata
Returns
--------
validation_df : pandas dataframe
The validation dataframe with any errors listed
Examples
---------
>>> import maccorcyclingdata.validate as validate
>>> validation_df = validate.validation_check_rest_steps(validation_df, df, i, 1)
>>> validation_df
"""
if df['current_ma'][i] != 0:
validation_df = validation_df.append({'time':datetime.now().strftime("%d/%m/%Y %H:%M:%S"), 'run': 'in progress', 'cell_num': cell_id, 'row_number': i, 'error': 'error - current is not at 0 during rest step'}, ignore_index=True)
return validation_df
def validate_test_data(schedule_df , df, cell_id, time_interval, temp_interval, max_temp, discharge_neg, temp_tol=3, char_tol=2):
"""
This is a wrapper function that validates the testdata against the schedule file.
The sub-modules that are validated are:
- validation_check_rest(validation_df, df, i, cell_id)
- validation_check_charging(validation_df, df, schedule_df, i, cell_id)
- validation_check_discharging(validation_df, df, schedule_df, i, cell_id, discharge_neg)
- validation_check_advanced_cycle(validation_df, df, i, cell_id)
- validation_check_max_step_num(validation_df, df, max_step, i, cell_id)
- validation_check_max_temp(validation_df, df, max_temp, i, cell_id, tol=3)
- validation_check_time_interval(validation_df, df, time_interval, i, cell_id)
- validation_check_temp_interval(validation_df, df, temp_interval, i, cell_id)
Parameters
-----------
schedule_df : pandas dataframe
The dataframe of the cleaned schedule file
df : pandas dataframe
The testdata dataframe
cell_id : integer
The cell id of the testdata
time_interval : integer
The maximum interval of how often the cycler should be recording data
temp_interval : integer
The maximum interval of a temperature change
max_temp : integer
The threshold for the highest temperature allowed
discharge_neg : boolean
Set to True if the current was exported as negative during discharge steps.
temp_tol : integer
Sets the tolerance between warning, error, and ABORT messages. Default is 3 degrees.
char_tol : integer
Sets the tolerance between the current/discharging current values and the set value in the schedule file. Default is 2.
Returns
--------
validation_df : pandas dataframe
The validation dataframe with any errors (if any) listed
Headers of the validation_df:
1. time (the current time of when the validation occurs)
2. run (tells whether the validation function is in progress or complete)
3. cell_num (the cell number of the testdata)
4. row_number (the row number where the error occurs)
5. error (what the error is)
Notes
------
Depending on the size of your testdata and schedules, this function may take much longer to run.
There are 3 possibilities of error messages:
1. warning - temperature approaching the max! (current temperature + temp_tol > max)
2. error - temperature has surpassed the max! (current temperature >= max)
3. ABORT - temperature is way too hot! (current temperature > max + temp_tol)
Examples
---------
>>> import maccorcyclingdata.validate as validate
>>> validation_df = validate.validate_test_data(schedule_df, df, 1, 10, 30, True, 5)
>>> validation_df
"""
column_names = ["time", "run", "cell_num", "row_number", "error"]
validation_df = pd.DataFrame(columns = column_names)
rest_steps, charge_steps, advance_steps, discharge_steps, end_steps, max_step = sort_scheduler_steps(schedule_df)
for i in df.index:
if df['step'][i] in rest_steps:
validation_df = validation_check_rest(validation_df, df, i, cell_id)
elif df['step'][i] in charge_steps:
validation_df = validation_check_charging(validation_df, df, schedule_df, i, cell_id, char_tol)
elif df['step'][i] in discharge_steps:
validation_df = validation_check_discharging(validation_df, df, schedule_df, i, cell_id, discharge_neg, char_tol)
elif df['step'][i] in advance_steps:
validation_df = validation_check_advanced_cycle(validation_df, df, i, cell_id)
validation_df = validation_check_max_step_num(validation_df, df, max_step, i, cell_id)
validation_df = validation_check_max_temp(validation_df, df, max_temp, i, cell_id, temp_tol)
if i != 0:
validation_df = validation_check_time_interval(validation_df, df, time_interval, i, cell_id)
validation_df = validation_check_temp_interval(validation_df, df, temp_interval, i, cell_id)
if validation_df.empty:
validation_df = validation_df.append({'time':datetime.now().strftime("%d/%m/%Y %H:%M:%S"), 'run': 'run complete', 'cell_num': str(cell_id), 'row_number': '-', 'error': 'there are no errors'}, ignore_index=True)
return validation_df
validation_df = validation_df.append({'time': datetime.now().strftime("%d/%m/%Y %H:%M:%S"), 'run': 'run complete', 'cell_num': str(cell_id), 'row_number': '-', 'error': 'errors listed above'}, ignore_index=True)
return validation_df
| 38.661826
| 291
| 0.660692
| 2,487
| 18,635
| 4.761158
| 0.084037
| 0.121611
| 0.037835
| 0.048982
| 0.853053
| 0.832278
| 0.799088
| 0.783971
| 0.782535
| 0.732708
| 0
| 0.005729
| 0.231876
| 18,635
| 481
| 292
| 38.742204
| 0.821503
| 0.495305
| 0
| 0.460177
| 0
| 0
| 0.198331
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079646
| false
| 0.026549
| 0.035398
| 0
| 0.274336
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
714f9d3f4f4a5f201df7ef9f0f102851763cc714
| 125
|
py
|
Python
|
python/testData/quickFixes/PyAddImportQuickFixTest/existingImportsAlwaysSuggestedFirstEvenIfLonger/main.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:53:39.000Z
|
2018-12-29T09:53:42.000Z
|
python/testData/quickFixes/PyAddImportQuickFixTest/existingImportsAlwaysSuggestedFirstEvenIfLonger/main.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/quickFixes/PyAddImportQuickFixTest/existingImportsAlwaysSuggestedFirstEvenIfLonger/main.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from long.pkg.path import ClassA
print(ClassA())
print(<error descr="Unresolved reference 'ClassB'">Clas<caret>sB</error>())
| 31.25
| 75
| 0.752
| 18
| 125
| 5.222222
| 0.833333
| 0.234043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072
| 125
| 4
| 75
| 31.25
| 0.810345
| 0
| 0
| 0
| 0
| 0
| 0.230159
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
7190234ea8b280d2856bddcbc8a27cb3729d451a
| 77
|
py
|
Python
|
py_tdlib/constructors/user_privacy_setting_show_status.py
|
Mr-TelegramBot/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 24
|
2018-10-05T13:04:30.000Z
|
2020-05-12T08:45:34.000Z
|
py_tdlib/constructors/user_privacy_setting_show_status.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 3
|
2019-06-26T07:20:20.000Z
|
2021-05-24T13:06:56.000Z
|
py_tdlib/constructors/user_privacy_setting_show_status.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 5
|
2018-10-05T14:29:28.000Z
|
2020-08-11T15:04:10.000Z
|
from ..factory import Type
class userPrivacySettingShowStatus(Type):
pass
| 12.833333
| 41
| 0.805195
| 8
| 77
| 7.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12987
| 77
| 5
| 42
| 15.4
| 0.925373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
71de123d0d018de3759c10675b0bc70377b0c8c8
| 62
|
py
|
Python
|
Python-Study/Randoms/Swappying_Values.py
|
Lucas-Dalamarta/My-Studies
|
a86157a5009f746faf6b1084f4c71c37aabe050f
|
[
"MIT"
] | null | null | null |
Python-Study/Randoms/Swappying_Values.py
|
Lucas-Dalamarta/My-Studies
|
a86157a5009f746faf6b1084f4c71c37aabe050f
|
[
"MIT"
] | null | null | null |
Python-Study/Randoms/Swappying_Values.py
|
Lucas-Dalamarta/My-Studies
|
a86157a5009f746faf6b1084f4c71c37aabe050f
|
[
"MIT"
] | null | null | null |
n1 = 10
n2 = 20
print(n1,n2)
n1 , n2 = n2 ,n1
print(n1,n2)
| 6.888889
| 16
| 0.548387
| 14
| 62
| 2.428571
| 0.357143
| 0.352941
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.311111
| 0.274194
| 62
| 8
| 17
| 7.75
| 0.444444
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e0a1cde44ef395bc024c7e21f4999faab23c98e5
| 220
|
py
|
Python
|
backend/producer_adapters/AbstractProducerAdapter.py
|
hslu-ige-laes/GEE_OpenHAB_EMS
|
9a0fa2d772b701f54a0bbf78eaee1378685871d0
|
[
"MIT"
] | 3
|
2021-05-25T20:04:42.000Z
|
2021-05-26T06:20:09.000Z
|
backend/producer_adapters/AbstractProducerAdapter.py
|
hslu-ige-laes/GEE_OpenHAB_EMS
|
9a0fa2d772b701f54a0bbf78eaee1378685871d0
|
[
"MIT"
] | null | null | null |
backend/producer_adapters/AbstractProducerAdapter.py
|
hslu-ige-laes/GEE_OpenHAB_EMS
|
9a0fa2d772b701f54a0bbf78eaee1378685871d0
|
[
"MIT"
] | null | null | null |
class AbstractProducerAdapter:
def __init__(self, config: dict):
self.config = config
def get_current_energy_production(self) -> float:
""" Returns the current energy production """
pass
| 27.5
| 53
| 0.668182
| 23
| 220
| 6.086957
| 0.652174
| 0.142857
| 0.328571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.245455
| 220
| 7
| 54
| 31.428571
| 0.843373
| 0.168182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.2
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
e0c6271e24d43bb758283c882431e7b6cac4b8d5
| 221
|
py
|
Python
|
gym/envs/yumi/__init__.py
|
carlo-/gym
|
7e7575601a0df5476ab9b15072c8b65693ce3071
|
[
"Python-2.0",
"OLDAP-2.7"
] | 1
|
2021-01-08T18:18:43.000Z
|
2021-01-08T18:18:43.000Z
|
gym/envs/yumi/__init__.py
|
carlo-/gym
|
7e7575601a0df5476ab9b15072c8b65693ce3071
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
gym/envs/yumi/__init__.py
|
carlo-/gym
|
7e7575601a0df5476ab9b15072c8b65693ce3071
|
[
"Python-2.0",
"OLDAP-2.7"
] | 1
|
2019-07-31T18:40:26.000Z
|
2019-07-31T18:40:26.000Z
|
from .yumi_env import YumiReachLeftArmEnv, YumiReachRightArmEnv, YumiReachTwoArmsEnv
from .yumi_env import YumiBarEnv, YumiLiftEnv
from .yumi_stepped import YumiSteppedEnv
from .yumi_constrained import YumiConstrainedEnv
| 44.2
| 84
| 0.882353
| 23
| 221
| 8.304348
| 0.565217
| 0.167539
| 0.115183
| 0.17801
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085973
| 221
| 4
| 85
| 55.25
| 0.945545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e0de52d677797064744c2f2b9be561e1083b8f81
| 139
|
py
|
Python
|
tamr_client/dataset/__init__.py
|
abafzal/tamr-client
|
9e6708ee8521910557ce8de146be4f6f278681ea
|
[
"Apache-2.0"
] | null | null | null |
tamr_client/dataset/__init__.py
|
abafzal/tamr-client
|
9e6708ee8521910557ce8de146be4f6f278681ea
|
[
"Apache-2.0"
] | null | null | null |
tamr_client/dataset/__init__.py
|
abafzal/tamr-client
|
9e6708ee8521910557ce8de146be4f6f278681ea
|
[
"Apache-2.0"
] | null | null | null |
from tamr_client.dataset import dataframe, record, unified
from tamr_client.dataset._dataset import attributes, from_resource_id, NotFound
| 46.333333
| 79
| 0.863309
| 19
| 139
| 6.052632
| 0.631579
| 0.13913
| 0.243478
| 0.365217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086331
| 139
| 2
| 80
| 69.5
| 0.905512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e0f2aeb3a153c7eadd52aa4ccf92c4cbaed8fc99
| 102,142
|
py
|
Python
|
allel/test/io/test_vcf_read.py
|
smbadiwe/scikit-allel
|
4432362fc2dea5706ad358f6b4bab4186fb70a60
|
[
"MIT"
] | null | null | null |
allel/test/io/test_vcf_read.py
|
smbadiwe/scikit-allel
|
4432362fc2dea5706ad358f6b4bab4186fb70a60
|
[
"MIT"
] | null | null | null |
allel/test/io/test_vcf_read.py
|
smbadiwe/scikit-allel
|
4432362fc2dea5706ad358f6b4bab4186fb70a60
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import io
import os
import shutil
import itertools
import gzip
import warnings
import tempfile
import atexit
import zarr
import h5py
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pytest
from pytest import approx
from allel.io.vcf_read import (iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5,
vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv,
vcf_to_recarray, read_vcf_headers)
from allel.test.tools import compare_arrays
# needed for PY2/PY3 consistent behaviour
warnings.resetwarnings()
warnings.simplefilter('always')
# setup temp dir for testing
tempdir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, tempdir)
def fixture_path(fn):
return os.path.join(os.path.dirname(__file__), os.pardir, 'data', fn)
def test_read_vcf_chunks():
vcf_path = fixture_path('sample.vcf')
fields, samples, headers, it = iter_vcf_chunks(vcf_path, fields='*', chunk_length=4,
buffer_size=100)
# check headers
assert 'q10' in headers.filters
assert 's50' in headers.filters
assert 'AA' in headers.infos
assert 'AC' in headers.infos
assert 'AF' in headers.infos
assert 'AN' in headers.infos
assert 'DB' in headers.infos
assert 'DP' in headers.infos
assert 'H2' in headers.infos
assert 'NS' in headers.infos
assert 'DP' in headers.formats
assert 'GQ' in headers.formats
assert 'GT' in headers.formats
assert 'HQ' in headers.formats
assert ['NA00001', 'NA00002', 'NA00003'] == headers.samples
assert ['NA00001', 'NA00002', 'NA00003'] == samples.tolist()
assert '1' == headers.infos['AA']['Number']
assert 'String' == headers.infos['AA']['Type']
assert 'Ancestral Allele' == headers.infos['AA']['Description']
assert '2' == headers.formats['HQ']['Number']
assert 'Integer' == headers.formats['HQ']['Type']
assert 'Haplotype Quality' == headers.formats['HQ']['Description']
# check chunk lengths
chunks = [chunk for chunk, _, _, _ in it]
assert 3 == len(chunks)
assert 4 == chunks[0]['variants/POS'].shape[0]
assert 4 == chunks[1]['variants/POS'].shape[0]
assert 1 == chunks[2]['variants/POS'].shape[0]
# check chunk contents
expected_fields = [
# fixed fields
'variants/CHROM',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'variants/altlen',
'variants/numalt',
'variants/is_snp',
# FORMAT fields
'calldata/GT',
'calldata/GQ',
'calldata/HQ',
'calldata/DP',
]
for chunk in chunks:
assert sorted(expected_fields) == sorted(chunk.keys())
def test_fields_all():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='*')
expected_fields = [
'samples',
# fixed fields
'variants/CHROM',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'variants/altlen',
'variants/numalt',
'variants/is_snp',
# FORMAT fields
'calldata/GT',
'calldata/GQ',
'calldata/HQ',
'calldata/DP',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_exclude():
vcf_path = fixture_path('sample.vcf')
exclude = ['variants/altlen', 'ID', 'calldata/DP']
callset = read_vcf(vcf_path, fields='*', exclude_fields=exclude)
expected_fields = [
'samples',
# fixed fields
'variants/CHROM',
'variants/POS',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'variants/numalt',
'variants/is_snp',
# FORMAT fields
'calldata/GT',
'calldata/GQ',
'calldata/HQ',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_rename():
vcf_path = fixture_path('sample.vcf')
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'foo/bar'}
callset = read_vcf(vcf_path, fields='*', rename_fields=rename)
print(sorted(callset.keys()))
expected_fields = [
'samples',
# fixed fields
'variants/chromosome',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'spam/eggs',
'variants/numalt',
'variants/is_snp',
# FORMAT fields
'foo/bar',
'calldata/DP',
'calldata/GQ',
'calldata/HQ',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_rename_clash():
vcf_path = fixture_path('sample.vcf')
# rename two fields to the same path
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'spam/eggs'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# rename two fields to the same path (case insensitive)
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'SPAM/EGGS'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'spam'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'SPAM'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam',
'calldata/GT': 'spam/eggs'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam',
'calldata/GT': 'SPAM/EGGS'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
def test_fields_default():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path)
expected_fields = [
'samples',
'variants/CHROM',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'calldata/GT',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_all_variants():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='variants/*')
expected_fields = [
# fixed fields
'variants/CHROM',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'variants/altlen',
'variants/numalt',
'variants/is_snp',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_info():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='INFO')
expected_fields = [
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_filter():
vcf_path = fixture_path('sample.vcf')
callset1 = read_vcf(vcf_path, fields='FILTER')
expected_fields = [
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
]
assert sorted(expected_fields) == sorted(callset1.keys())
# this has explicit PASS definition in header, shouldn't cause problems
vcf_path = fixture_path('test16.vcf')
callset2 = read_vcf(vcf_path, fields='FILTER')
expected_fields = [
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
]
assert sorted(expected_fields) == sorted(callset2.keys())
for k in callset1.keys():
assert_array_equal(callset1[k], callset2[k])
def test_fields_all_calldata():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='calldata/*')
expected_fields = [
'calldata/GT',
'calldata/GQ',
'calldata/HQ',
'calldata/DP',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_selected():
vcf_path = fixture_path('sample.vcf')
# without samples
callset = read_vcf(vcf_path,
fields=['CHROM', 'variants/POS', 'AC', 'variants/AF', 'GT',
'calldata/HQ', 'FILTER_q10', 'variants/numalt'])
expected_fields = [
'variants/CHROM',
'variants/POS',
'variants/FILTER_q10',
'variants/AC',
'variants/AF',
'variants/numalt',
# FORMAT fields
'calldata/GT',
'calldata/HQ',
]
assert sorted(expected_fields) == sorted(callset.keys())
# with samples
callset = read_vcf(vcf_path,
fields=['CHROM', 'variants/POS', 'AC', 'variants/AF', 'GT',
'calldata/HQ', 'FILTER_q10', 'variants/numalt', 'samples'],
chunk_length=4, buffer_size=100)
expected_fields = [
'samples',
'variants/CHROM',
'variants/POS',
'variants/FILTER_q10',
'variants/AC',
'variants/AF',
'variants/numalt',
# FORMAT fields
'calldata/GT',
'calldata/HQ',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_dups():
vcf_path = fixture_path('sample.vcf')
# silently collapse dups
callset = read_vcf(vcf_path,
fields=['CHROM', 'variants/CHROM', 'variants/AF', 'variants/AF',
'numalt', 'variants/numalt'])
expected_fields = [
'variants/CHROM',
'variants/AF',
'variants/numalt'
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_dups_case_insensitive():
vcf_path = fixture_path('altlen.vcf')
# allow case-insensitive dups here (but not in vcf_to_zarr)
callset = read_vcf(vcf_path, fields=['ALTLEN', 'altlen'])
expected_fields = [
'variants/ALTLEN',
'variants/altlen',
]
assert sorted(expected_fields) == sorted(callset.keys())
def _test_read_vcf_content(vcf, chunk_length, buffer_size):
# object dtype for strings
if isinstance(vcf, str):
input_file = vcf
close = False
else:
input_file = vcf()
close = True
callset = read_vcf(input_file,
fields='*',
chunk_length=chunk_length,
buffer_size=buffer_size,
types={'calldata/DP': 'object'})
if close:
input_file.close()
# samples
assert (3,) == callset['samples'].shape
assert 'O' == callset['samples'].dtype.kind
assert ['NA00001', 'NA00002', 'NA00003'] == callset['samples'].tolist()
# fixed fields
assert (9,) == callset['variants/CHROM'].shape
assert np.dtype(object) == callset['variants/CHROM'].dtype
assert '19' == callset['variants/CHROM'][0]
assert (9,) == callset['variants/POS'].shape
assert 111 == callset['variants/POS'][0]
assert (9,) == callset['variants/ID'].shape
assert np.dtype(object) == callset['variants/ID'].dtype
assert 'rs6054257' == callset['variants/ID'][2]
assert (9,) == callset['variants/REF'].shape
assert np.dtype(object) == callset['variants/REF'].dtype
assert 'A' == callset['variants/REF'][0]
assert (9, 3) == callset['variants/ALT'].shape
assert np.dtype(object) == callset['variants/ALT'].dtype
assert 'ATG' == callset['variants/ALT'][8, 1]
assert (9,) == callset['variants/QUAL'].shape
assert 10.0 == callset['variants/QUAL'][1]
assert (9,) == callset['variants/FILTER_PASS'].shape
assert callset['variants/FILTER_PASS'][2]
assert not callset['variants/FILTER_PASS'][3]
assert (9,) == callset['variants/FILTER_q10'].shape
assert callset['variants/FILTER_q10'][3]
# INFO fields
assert 3 == callset['variants/NS'][2]
assert .5 == callset['variants/AF'][2, 0]
assert callset['variants/DB'][2]
assert (3, 1, -1) == tuple(callset['variants/AC'][6])
# test calldata content
assert (9, 3, 2) == callset['calldata/GT'].shape
assert (0, 0) == tuple(callset['calldata/GT'][0, 0])
assert (-1, -1) == tuple(callset['calldata/GT'][6, 2])
assert (-1, -1) == tuple(callset['calldata/GT'][7, 2])
assert (9, 3, 2) == callset['calldata/HQ'].shape
assert (10, 15) == tuple(callset['calldata/HQ'][0, 0])
assert (9, 3) == callset['calldata/DP'].shape
assert np.dtype(object) == callset['calldata/DP'].dtype
assert ('4', '2', '3') == tuple(callset['calldata/DP'][6])
# String (S) dtype
if isinstance(vcf, str):
input_file = vcf
close = False
else:
input_file = vcf()
close = True
types = {'CHROM': 'S12', 'ID': 'S20', 'REF': 'S20', 'ALT': 'S20', 'calldata/DP': 'S3',
'samples': 'S20'}
callset = read_vcf(input_file, fields='*', chunk_length=chunk_length,
buffer_size=buffer_size, types=types)
if close:
input_file.close()
# samples
assert (3,) == callset['samples'].shape
assert 'S' == callset['samples'].dtype.kind
assert [b'NA00001', b'NA00002', b'NA00003'] == callset['samples'].tolist()
# fixed fields
assert (9,) == callset['variants/CHROM'].shape
assert 'S' == callset['variants/CHROM'].dtype.kind
assert b'19' == callset['variants/CHROM'][0]
assert (9,) == callset['variants/POS'].shape
assert 111 == callset['variants/POS'][0]
assert (9,) == callset['variants/ID'].shape
assert 'S' == callset['variants/ID'].dtype.kind
assert b'rs6054257' == callset['variants/ID'][2]
assert (9,) == callset['variants/REF'].shape
assert b'A' == callset['variants/REF'][0]
assert 'S' == callset['variants/REF'].dtype.kind
assert (9, 3) == callset['variants/ALT'].shape
assert b'ATG' == callset['variants/ALT'][8, 1]
assert 'S' == callset['variants/ALT'].dtype.kind
assert (9,) == callset['variants/QUAL'].shape
assert 10.0 == callset['variants/QUAL'][1]
assert (9,) == callset['variants/FILTER_PASS'].shape
assert callset['variants/FILTER_PASS'][2]
assert not callset['variants/FILTER_PASS'][3]
assert (9,) == callset['variants/FILTER_q10'].shape
assert callset['variants/FILTER_q10'][3]
# INFO fields
assert 3 == callset['variants/NS'][2]
assert .5 == callset['variants/AF'][2, 0]
assert callset['variants/DB'][2]
assert (3, 1, -1) == tuple(callset['variants/AC'][6])
# test calldata content
assert (9, 3, 2) == callset['calldata/GT'].shape
assert (0, 0) == tuple(callset['calldata/GT'][0, 0])
assert (-1, -1) == tuple(callset['calldata/GT'][6, 2])
assert (-1, -1) == tuple(callset['calldata/GT'][7, 2])
assert (9, 3, 2) == callset['calldata/HQ'].shape
assert (10, 15) == tuple(callset['calldata/HQ'][0, 0])
assert (9, 3) == callset['calldata/DP'].shape
assert 'S' == callset['calldata/DP'].dtype.kind
assert (b'4', b'2', b'3') == tuple(callset['calldata/DP'][6])
def test_inputs():
vcf_path = fixture_path('sample.vcf')
with open(vcf_path, mode='rb') as f:
data = f.read(-1)
inputs = (vcf_path,
vcf_path + '.gz',
lambda: open(vcf_path, mode='rb'),
lambda: gzip.open(vcf_path + '.gz', mode='rb'),
lambda: io.BytesIO(data),
lambda: io.BytesIO(data.replace(b'\n', b'\r')),
lambda: io.BytesIO(data.replace(b'\n', b'\r\n')))
chunk_length = 3
buffer_size = 10
for i in inputs:
_test_read_vcf_content(i, chunk_length, buffer_size)
def test_chunk_lengths():
vcf_path = fixture_path('sample.vcf')
chunk_lengths = 1, 2, 3, 5, 10, 20
buffer_size = 10
for chunk_length in chunk_lengths:
_test_read_vcf_content(vcf_path, chunk_length, buffer_size)
def test_buffer_sizes():
vcf_path = fixture_path('sample.vcf')
chunk_length = 3
buffer_sizes = 1, 2, 4, 8, 16, 32, 64, 128, 256, 512
for buffer_size in buffer_sizes:
_test_read_vcf_content(vcf_path, chunk_length, buffer_size)
def test_utf8():
vcf_path = fixture_path('sample.utf8.vcf')
callset = read_vcf(vcf_path, fields='*')
# samples
assert (3,) == callset['samples'].shape
assert 'O' == callset['samples'].dtype.kind
assert [u'NA00001', u'Γεια σου κόσμε!', u'NA00003'] == callset['samples'].tolist()
# CHROM
assert (9,) == callset['variants/CHROM'].shape
assert np.dtype(object) == callset['variants/CHROM'].dtype
assert '19' == callset['variants/CHROM'][0]
assert u'Njatjeta Botë!' == callset['variants/CHROM'][-2]
# POS
assert (9,) == callset['variants/POS'].shape
assert 111 == callset['variants/POS'][0]
# ID
assert (9,) == callset['variants/ID'].shape
assert np.dtype(object) == callset['variants/ID'].dtype
assert 'foo' == callset['variants/ID'][0]
assert u'¡Hola mundo!' == callset['variants/ID'][1]
# REF
assert (9,) == callset['variants/REF'].shape
assert np.dtype(object) == callset['variants/REF'].dtype
assert 'A' == callset['variants/REF'][0]
# ALT
assert (9, 3) == callset['variants/ALT'].shape
assert np.dtype(object) == callset['variants/ALT'].dtype
assert 'ATG' == callset['variants/ALT'][8, 1]
# QUAL
assert (9,) == callset['variants/QUAL'].shape
assert 10.0 == callset['variants/QUAL'][1]
# FILTER
assert (9,) == callset['variants/FILTER_PASS'].shape
assert callset['variants/FILTER_PASS'][2]
assert not callset['variants/FILTER_PASS'][5]
assert (9,) == callset[u'variants/FILTER_Helló_világ!'].shape
assert not callset[u'variants/FILTER_Helló_világ!'][0]
assert callset[u'variants/FILTER_Helló_világ!'][5]
# INFO fields
assert u'foo' == callset['variants/TEXT'][0]
assert u'こんにちは世界' == callset['variants/TEXT'][4]
# calldata
assert (9, 3, 2) == callset['calldata/GT'].shape
assert (0, 0) == tuple(callset['calldata/GT'][0, 0])
assert (-1, -1) == tuple(callset['calldata/GT'][6, 2])
assert (-1, -1) == tuple(callset['calldata/GT'][7, 2])
assert (9, 3, 2) == callset['calldata/HQ'].shape
assert (10, 15) == tuple(callset['calldata/HQ'][0, 0])
assert (9, 3) == callset['calldata/DP'].shape
assert (4, 2, 3) == tuple(callset['calldata/DP'][6])
assert (u'foo', u'Hej Världen!', u'.') == tuple(callset['calldata/GTXT'][0])
def test_truncation_chrom():
input_data = (b"#CHROM\n"
b"2L\n"
b"2R\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
for string_type in 'S10', 'object':
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['CHROM', 'samples'],
types={'CHROM': string_type})
# check fields
expected_fields = ['variants/CHROM']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/CHROM']
assert 2 == len(a)
if string_type == 'S10':
assert b'2L' == a[0]
assert b'2R' == a[1]
else:
assert '2L' == a[0]
assert '2R' == a[1]
def test_truncation_pos():
input_data = (b"#CHROM\tPOS\n"
b"2L\t12\n"
b"2R\t34\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['POS', 'samples'])
# check fields
expected_fields = ['variants/POS']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/POS']
assert 2 == len(a)
assert 12 == a[0]
assert 34 == a[1]
def test_truncation_id():
input_data = (b"#CHROM\tPOS\tID\n"
b"2L\t12\tfoo\n"
b"2R\t34\tbar\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
for string_type in 'S10', 'object':
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['ID', 'samples'],
types={'ID': string_type})
# check fields
expected_fields = ['variants/ID']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/ID']
assert 2 == len(a)
if string_type == 'S10':
assert b'foo' == a[0]
assert b'bar' == a[1]
else:
assert 'foo' == a[0]
assert 'bar' == a[1]
def test_truncation_ref():
input_data = (b"#CHROM\tPOS\tID\tREF\n"
b"2L\t12\tfoo\tA\n"
b"2R\t34\tbar\tC\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
for string_type in 'S10', 'object':
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['REF', 'samples'],
types={'REF': string_type})
# check fields
expected_fields = ['variants/REF']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/REF']
assert 2 == len(a)
if string_type == 'S10':
assert b'A' == a[0]
assert b'C' == a[1]
else:
assert 'A' == a[0]
assert 'C' == a[1]
def test_truncation_alt():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\n"
b"2L\t12\tfoo\tA\tC\n"
b"2R\t34\tbar\tC\tG\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
for string_type in 'S10', 'object':
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['ALT', 'samples'], numbers=dict(ALT=1),
types={'ALT': string_type})
# check fields
expected_fields = ['variants/ALT']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/ALT']
assert 2 == len(a)
if string_type == 'S10':
assert b'C' == a[0]
assert b'G' == a[1]
else:
assert 'C' == a[0]
assert 'G' == a[1]
def test_truncation_qual():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\n"
b"2L\t12\tfoo\tA\tC\t1.2\n"
b"2R\t34\tbar\tC\tG\t3.4\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['QUAL', 'samples'])
# check fields
expected_fields = ['variants/QUAL']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/QUAL']
assert 2 == len(a)
assert approx(1.2) == a[0]
assert approx(3.4) == a[1]
def test_truncation_filter():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\n"
b"2R\t34\tbar\tC\tG\t3.4\tPASS\n"
b"2R\t56\tbaz\tG\tT\t56.77\tq10,s50\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file,
fields=['FILTER_PASS', 'FILTER_q10', 'FILTER_s50', 'samples'])
# check fields
expected_fields = ['variants/FILTER_PASS', 'variants/FILTER_q10',
'variants/FILTER_s50']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/FILTER_PASS']
assert 3 == len(a)
assert [False, True, False] == a.tolist()
a = callset['variants/FILTER_q10']
assert 3 == len(a)
assert [False, False, True] == a.tolist()
a = callset['variants/FILTER_s50']
assert 3 == len(a)
assert [False, False, True] == a.tolist()
def test_truncation_info():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\tfoo=42;bar=1.2\n"
b"2R\t34\tbar\tC\tG\t3.4\tPASS\t.\n"
b"2R\t56\tbaz\tG\tT\t56.77\tq10,s50\t\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file,
fields=['foo', 'bar', 'samples'],
types=dict(foo='Integer', bar='Float'))
# check fields
expected_fields = ['variants/foo', 'variants/bar']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/foo']
assert 3 == len(a)
assert 42 == a[0]
assert -1 == a[1]
assert -1 == a[2]
a = callset['variants/bar']
assert 3 == len(a)
assert approx(1.2) == a[0]
assert np.isnan(a[1])
assert np.isnan(a[2])
def test_truncation_format():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\tfoo=42;bar=1.2\tGT:GQ\n"
b"2R\t34\tbar\tC\tG\t3.4\tPASS\t.\t.\n"
b"2R\t56\tbaz\tG\tT\t56.77\tq10,s50\t\t\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file,
fields=['foo', 'bar', 'samples'],
types=dict(foo='Integer', bar='Float'))
# check fields
expected_fields = ['variants/foo', 'variants/bar']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/foo']
assert 3 == len(a)
assert 42 == a[0]
assert -1 == a[1]
assert -1 == a[2]
a = callset['variants/bar']
assert 3 == len(a)
assert approx(1.2) == a[0]
assert np.isnan(a[1])
assert np.isnan(a[2])
def test_truncation_calldata():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\tfoo=42;bar=1.2\tGT:GQ\t0/1:12\t1/2:34\n"
b"2R\t34\tbar\tC\tG\t3.4\tPASS\t.\tGT\t./.\n"
b"2R\t56\tbaz\tG\tT\t56.77\tq10,s50\t\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file,
fields=['calldata/GT', 'calldata/GQ', 'samples'],
types={'calldata/GT': 'i1', 'calldata/GQ': 'i2'})
# check fields
expected_fields = ['calldata/GT', 'calldata/GQ', 'samples']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
assert 2 == len(callset['samples'])
assert ['S2', 'S1'] == callset['samples'].tolist()
a = callset['calldata/GT']
assert (3, 2, 2) == a.shape
assert (0, 1) == tuple(a[0, 0])
assert (1, 2) == tuple(a[0, 1])
assert (-1, -1) == tuple(a[1, 0])
assert (-1, -1) == tuple(a[1, 1])
assert (-1, -1) == tuple(a[2, 0])
assert (-1, -1) == tuple(a[2, 1])
a = callset['calldata/GQ']
assert (3, 2) == a.shape
assert 12 == a[0, 0]
assert 34 == a[0, 1]
assert -1 == a[1, 0]
assert -1 == a[1, 1]
assert -1 == a[2, 0]
assert -1 == a[2, 1]
def test_info_types():
vcf_path = fixture_path('sample.vcf')
for dtype in ('i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'f4', 'f8', 'S10',
'object'):
callset = read_vcf(vcf_path, fields=['variants/DP', 'variants/AC'],
types={'variants/DP': dtype, 'variants/AC': dtype},
numbers={'variants/AC': 3})
assert np.dtype(dtype) == callset['variants/DP'].dtype
assert (9,) == callset['variants/DP'].shape
assert (9, 3) == callset['variants/AC'].shape
def test_vcf_types():
input_data = (
b'##INFO=<ID=foo,Number=1,Type=String,Description="Testing 123.">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\t.\tA\tC\t.\t.\tfoo=bar\t.\n"
)
callset = read_vcf(io.BytesIO(input_data), fields=['foo'])
assert np.dtype(object) == callset['variants/foo'].dtype
input_data = (
b'##INFO=<ID=foo,Number=1,Type=Integer,Description="Testing 123.">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\t.\tA\tC\t.\t.\tfoo=42\t.\n"
)
callset = read_vcf(io.BytesIO(input_data), fields=['foo'])
assert np.dtype('i4') == callset['variants/foo'].dtype
input_data = (
b'##INFO=<ID=foo,Number=1,Type=Float,Description="Testing 123.">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\t.\tA\tC\t.\t.\tfoo=42.0\t.\n"
)
callset = read_vcf(io.BytesIO(input_data), fields=['foo'])
assert np.dtype('f4') == callset['variants/foo'].dtype
input_data = (
b'##INFO=<ID=foo,Number=1,Type=Character,Description="Testing 123.">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\t.\tA\tC\t.\t.\tfoo=b\t.\n"
)
callset = read_vcf(io.BytesIO(input_data), fields=['foo'])
assert np.dtype('S1') == callset['variants/foo'].dtype
def test_genotype_types():
vcf_path = fixture_path('sample.vcf')
for dtype in 'i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'S3', 'object':
callset = read_vcf(vcf_path, fields=['GT'], types={'GT': dtype},
numbers={'GT': 2})
assert np.dtype(dtype) == callset['calldata/GT'].dtype
assert (9, 3, 2) == callset['calldata/GT'].shape
# non-GT field with genotype dtype
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS1\tS2\tS3\n"
b"2L\t12\t.\tA\t.\t.\t.\t.\tCustomGT:CustomGQ\t0/0/0:11\t0/1/2:12\t././.:.\n"
b"2L\t34\t.\tC\tT\t.\t.\t.\tCustomGT:CustomGQ\t0/1/2:22\t3/3/.:33\t.\n"
b"3R\t45\t.\tG\tA,T\t.\t.\t.\tCustomGT:CustomGQ\t0/1:.\t5:12\t\n"
)
callset = read_vcf(io.BytesIO(input_data),
fields=['calldata/CustomGT', 'calldata/CustomGQ'],
numbers={'calldata/CustomGT': 3, 'calldata/CustomGQ': 1},
types={'calldata/CustomGT': 'genotype/i1',
'calldata/CustomGQ': 'i2'})
e = np.array([[[0, 0, 0], [0, 1, 2], [-1, -1, -1]],
[[0, 1, 2], [3, 3, -1], [-1, -1, -1]],
[[0, 1, -1], [5, -1, -1], [-1, -1, -1]]], dtype='i1')
a = callset['calldata/CustomGT']
assert_array_equal(e, a)
assert e.dtype == a.dtype
e = np.array([[11, 12, -1],
[22, 33, -1],
[-1, 12, -1]], dtype='i2')
a = callset['calldata/CustomGQ']
assert_array_equal(e, a)
assert e.dtype == a.dtype
def test_calldata_types():
vcf_path = fixture_path('sample.vcf')
for dtype in ('i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'f4', 'f8', 'S10',
'object'):
callset = read_vcf(vcf_path, fields=['HQ'], types={'HQ': dtype},
numbers={'HQ': 2})
assert np.dtype(dtype) == callset['calldata/HQ'].dtype
assert (9, 3, 2) == callset['calldata/HQ'].shape
def test_genotype_ploidy():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=1))
gt = callset['calldata/GT']
assert (9, 3) == gt.shape
assert (0, 0, 0) == tuple(gt[8, :])
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=2))
gt = callset['calldata/GT']
assert (9, 3, 2) == gt.shape
assert (0, -1) == tuple(gt[8, 0])
assert (0, 1) == tuple(gt[8, 1])
assert (0, 2) == tuple(gt[8, 2])
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=3))
gt = callset['calldata/GT']
assert (9, 3, 3) == gt.shape
assert (0, -1, -1) == tuple(gt[8, 0])
assert (0, 1, -1) == tuple(gt[8, 1])
assert (0, 2, -1) == tuple(gt[8, 2])
def test_fills_info():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='AN', numbers=dict(AN=1))
a = callset['variants/AN']
assert (9,) == a.shape
assert -1 == a[0]
assert -1 == a[1]
assert -1 == a[2]
callset = read_vcf(vcf_path, fields='AN', numbers=dict(AN=1), fills=dict(AN=-2))
a = callset['variants/AN']
assert (9,) == a.shape
assert -2 == a[0]
assert -2 == a[1]
assert -2 == a[2]
callset = read_vcf(vcf_path, fields='AN', numbers=dict(AN=1), fills=dict(AN=-1))
a = callset['variants/AN']
assert (9,) == a.shape
assert -1 == a[0]
assert -1 == a[1]
assert -1 == a[2]
def test_fills_genotype():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=2))
gt = callset['calldata/GT']
assert (9, 3, 2) == gt.shape
assert (0, -1) == tuple(gt[8, 0])
assert (0, 1) == tuple(gt[8, 1])
assert (0, 2) == tuple(gt[8, 2])
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=2), fills=dict(GT=-2))
gt = callset['calldata/GT']
assert (9, 3, 2) == gt.shape
assert (0, -2) == tuple(gt[8, 0])
assert (0, 1) == tuple(gt[8, 1])
assert (0, 2) == tuple(gt[8, 2])
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=3), fills=dict(GT=-1))
gt = callset['calldata/GT']
assert (9, 3, 3) == gt.shape
assert (0, -1, -1) == tuple(gt[8, 0])
assert (0, 1, -1) == tuple(gt[8, 1])
assert (0, 2, -1) == tuple(gt[8, 2])
def test_fills_calldata():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='HQ', numbers=dict(HQ=2))
a = callset['calldata/HQ']
assert (9, 3, 2) == a.shape
assert (10, 15) == tuple(a[0, 0])
assert (-1, -1) == tuple(a[7, 0])
assert (-1, -1) == tuple(a[8, 0])
callset = read_vcf(vcf_path, fields='HQ', numbers=dict(HQ=2), fills=dict(HQ=-2))
a = callset['calldata/HQ']
assert (9, 3, 2) == a.shape
assert (10, 15) == tuple(a[0, 0])
assert (-2, -2) == tuple(a[7, 0])
assert (-2, -2) == tuple(a[8, 0])
callset = read_vcf(vcf_path, fields='HQ', numbers=dict(HQ=2), fills=dict(HQ=-1))
a = callset['calldata/HQ']
assert (9, 3, 2) == a.shape
assert (10, 15) == tuple(a[0, 0])
assert (-1, -1) == tuple(a[7, 0])
assert (-1, -1) == tuple(a[8, 0])
def test_numbers():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields=['ALT'], numbers=dict(ALT=1))
a = callset['variants/ALT']
assert (9,) == a.shape
assert 'A' == a[8]
callset = read_vcf(vcf_path, fields=['ALT'], numbers=dict(ALT=2),
types=dict(ALT='S4'))
a = callset['variants/ALT']
assert (9, 2) == a.shape
assert b'A' == a[8, 0]
assert b'ATG' == a[8, 1]
callset = read_vcf(vcf_path, fields=['ALT'], numbers=dict(ALT=3),
types=dict(ALT='S4'))
a = callset['variants/ALT']
assert (9, 3) == a.shape
assert b'A' == a[8, 0]
assert b'ATG' == a[8, 1]
assert b'C' == a[8, 2]
callset = read_vcf(vcf_path, fields=['AC'], numbers=dict(AC=0))
a = callset['variants/AC']
assert (9,) == a.shape
assert not a[0]
assert a[6]
callset = read_vcf(vcf_path, fields=['AC'], numbers=dict(AC=1))
a = callset['variants/AC']
assert (9,) == a.shape
assert -1 == a[0]
assert 3 == a[6]
callset = read_vcf(vcf_path, fields=['AC'], numbers=dict(AC=2))
a = callset['variants/AC']
assert (9, 2) == a.shape
assert -1 == a[0, 0]
assert -1 == a[0, 1]
assert 3 == a[6, 0]
assert 1 == a[6, 1]
callset = read_vcf(vcf_path, fields='AF', numbers=dict(AF=1))
a = callset['variants/AF']
assert (9,) == a.shape
assert 0.5 == a[2]
assert approx(0.333) == a[4]
callset = read_vcf(vcf_path, fields='AF', numbers=dict(AF=2))
a = callset['variants/AF']
assert (9, 2) == a.shape
assert 0.5 == a[2, 0]
assert np.isnan(a[2, 1])
assert approx(0.333) == a[4, 0]
assert approx(0.667) == a[4, 1]
callset = read_vcf(vcf_path, fields=['HQ'], numbers=dict(HQ=1))
a = callset['calldata/HQ']
assert (9, 3) == a.shape
assert 10 == a[0, 0]
assert 51 == a[2, 0]
assert -1 == a[6, 0]
callset = read_vcf(vcf_path, fields=['HQ'], numbers=dict(HQ=2))
a = callset['calldata/HQ']
assert (9, 3, 2) == a.shape
assert (10, 15) == tuple(a[0, 0])
assert (51, 51) == tuple(a[2, 0])
assert (-1, -1) == tuple(a[6, 0])
def test_alt_number():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=2)
a = callset['variants/ALT']
assert (9, 2) == a.shape
a = callset['variants/AC']
assert (9, 2) == a.shape
a = callset['variants/AF']
assert (9, 2) == a.shape
callset = read_vcf(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=1)
a = callset['variants/ALT']
assert (9,) == a.shape
a = callset['variants/AC']
assert (9,) == a.shape
a = callset['variants/AF']
assert (9,) == a.shape
callset = read_vcf(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=5)
a = callset['variants/ALT']
assert (9, 5) == a.shape
a = callset['variants/AC']
assert (9, 5) == a.shape
a = callset['variants/AF']
assert (9, 5) == a.shape
# can override
callset = read_vcf(vcf_path, fields=['ALT', 'AC', 'AF'],
alt_number=5, numbers={'ALT': 2, 'AC': 4})
a = callset['variants/ALT']
assert (9, 2) == a.shape
a = callset['variants/AC']
assert (9, 4) == a.shape
a = callset['variants/AF']
assert (9, 5) == a.shape
def test_read_region():
for vcf_path in (fixture_path('sample.vcf.gz'),
fixture_path('sample.vcf')):
for tabix in 'tabix', None, 'foobar':
region = '19'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '19')
assert 2 == len(pos)
assert_array_equal([111, 112], pos)
region = '20'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 6 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 6 == len(pos)
assert_array_equal([14370, 17330, 1110696, 1230237, 1234567, 1235237], pos)
region = 'X'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 1 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == 'X')
assert 1 == len(pos)
assert_array_equal([10], pos)
region = 'Y'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
assert callset is None
region = '20:1-100000'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([14370, 17330], pos)
region = '20:1000000-1233000'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([1110696, 1230237], pos)
region = '20:1233000-2000000'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([1234567, 1235237], pos)
def test_read_region_unsorted():
# Test behaviour when data are not sorted by chromosome or position and tabix is
# not available.
fn = fixture_path('unsorted.vcf')
tabix = None
region = '19'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '19')
assert 2 == len(pos)
assert_array_equal([111, 112], pos)
region = '20'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 6 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 6 == len(pos)
assert_array_equal([14370, 1230237, 1234567, 1235237, 17330, 1110696], pos)
region = 'X'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 1 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == 'X')
assert 1 == len(pos)
assert_array_equal([10], pos)
region = 'Y'
callset = read_vcf(fn, region=region, tabix=tabix)
assert callset is None
region = '20:1-100000'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([14370, 17330], pos)
region = '20:1000000-1233000'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([1230237, 1110696], pos)
region = '20:1233000-2000000'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([1234567, 1235237], pos)
def test_read_samples():
vcf_path = fixture_path('sample.vcf')
for samples in ['NA00001', 'NA00003'], [0, 2], ['NA00003', 'NA00001'], [2, 'NA00001']:
callset = read_vcf(vcf_path, fields=['samples', 'GT'], samples=samples)
assert ['NA00001', 'NA00003'] == callset['samples'].astype('U').tolist()
gt = callset['calldata/GT']
assert (9, 2, 2) == gt.shape
assert (0, 0) == tuple(gt[2, 0])
assert (1, 1) == tuple(gt[2, 1])
assert (1, 2) == tuple(gt[4, 0])
assert (2, 2) == tuple(gt[4, 1])
for samples in ['NA00002'], [1]:
callset = read_vcf(vcf_path, fields=['samples', 'GT'], samples=samples)
assert ['NA00002'] == callset['samples'].astype('U').tolist()
gt = callset['calldata/GT']
assert (9, 1, 2) == gt.shape
assert (1, 0) == tuple(gt[2, 0])
assert (2, 1) == tuple(gt[4, 0])
def test_read_empty():
vcf_path = fixture_path('empty.vcf')
callset = read_vcf(vcf_path)
assert callset is None
def test_ann():
vcf_path = fixture_path('ann.vcf')
# all ANN fields
callset = read_vcf(vcf_path, fields=['ANN'], transformers=[ANNTransformer()])
expect_keys = sorted(['variants/ANN_Allele',
'variants/ANN_Annotation',
'variants/ANN_Annotation_Impact',
'variants/ANN_Gene_Name',
'variants/ANN_Gene_ID',
'variants/ANN_Feature_Type',
'variants/ANN_Feature_ID',
'variants/ANN_Transcript_BioType',
'variants/ANN_Rank',
'variants/ANN_HGVS_c',
'variants/ANN_HGVS_p',
'variants/ANN_cDNA_pos',
'variants/ANN_cDNA_length',
'variants/ANN_CDS_pos',
'variants/ANN_CDS_length',
'variants/ANN_AA_pos',
'variants/ANN_AA_length',
'variants/ANN_Distance'])
assert expect_keys == sorted(callset.keys())
a = callset['variants/ANN_Allele']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['T', '', 'T'], a)
a = callset['variants/ANN_Annotation']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['intergenic_region', '', 'missense_variant'], a)
a = callset['variants/ANN_Annotation_Impact']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['MODIFIER', '', 'MODERATE'], a)
a = callset['variants/ANN_Gene_Name']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['AGAP004677', '', 'AGAP005273'], a)
a = callset['variants/ANN_Gene_ID']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['AGAP004677', '', 'AGAP005273'], a)
a = callset['variants/ANN_Feature_Type']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['intergenic_region', '', 'transcript'], a)
a = callset['variants/ANN_Feature_ID']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['AGAP004677', '', 'AGAP005273-RA'], a)
a = callset['variants/ANN_Transcript_BioType']
assert np.dtype('object') == a.dtype
assert (3,) == a.shape
assert_array_equal(['', '', 'VectorBase'], a)
assert np.dtype('object') == a.dtype
a = callset['variants/ANN_Rank']
assert (3,) == a.shape
assert np.dtype('int8') == a.dtype
assert_array_equal([-1, -1, 1], a[:])
a = callset['variants/ANN_HGVS_c']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['', '', '17A>T'], a)
a = callset['variants/ANN_HGVS_p']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['', '', 'Asp6Val'], a)
a = callset['variants/ANN_cDNA_pos']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 17], a)
a = callset['variants/ANN_cDNA_length']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 4788], a)
a = callset['variants/ANN_CDS_pos']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 17], a)
a = callset['variants/ANN_CDS_length']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 4788], a)
a = callset['variants/ANN_AA_pos']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 6], a)
a = callset['variants/ANN_AA_length']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 1596], a)
a = callset['variants/ANN_Distance']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([3000, -1, -1], a)
# numbers=2
callset = read_vcf(vcf_path, fields=['ANN'], numbers={'ANN': 2},
transformers=[ANNTransformer()])
a = callset['variants/ANN_Allele']
assert (3, 2) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['T', ''], a[0])
assert_array_equal(['', ''], a[1])
assert_array_equal(['T', 'G'], a[2])
a = callset['variants/ANN_cDNA_pos']
assert (3, 2) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 17], a[:, 0])
assert_array_equal([-1, -1, 12], a[:, 1])
a = callset['variants/ANN_cDNA_length']
assert (3, 2) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 4788], a[:, 0])
assert_array_equal([-1, -1, 4768], a[:, 1])
# choose fields and types
transformers = [
ANNTransformer(
fields=['Allele', 'ANN_HGVS_c', 'variants/ANN_cDNA_pos'],
types={'Allele': 'S12',
'ANN_HGVS_c': 'S20',
'variants/ANN_cDNA_pos': 'i8'})
]
callset = read_vcf(vcf_path, fields=['ANN'], transformers=transformers)
assert (sorted(['variants/ANN_Allele', 'variants/ANN_HGVS_c',
'variants/ANN_cDNA_pos']) == sorted(callset.keys()))
a = callset['variants/ANN_Allele']
assert (3,) == a.shape
assert np.dtype('S12') == a.dtype
assert_array_equal([b'T', b'', b'T'], a)
a = callset['variants/ANN_HGVS_c']
assert (3,) == a.shape
assert np.dtype('S20') == a.dtype
assert_array_equal([b'', b'', b'17A>T'], a)
a = callset['variants/ANN_cDNA_pos']
assert (3,) == a.shape
assert np.dtype('i8') == a.dtype
assert_array_equal([-1, -1, 17], a)
def test_format_inconsistencies():
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\t.\tGT:GQ\t0/1:12\t1/2\t2/3:34:67,89\t\n"
b"2R\t34\tbar\tC\tG\t3.4\t.\t.\tGT\t./.\t\t3/3:45\t1/2:11:55,67\n"
)
input_file = io.BytesIO(input_data)
callset = read_vcf(input_file, fields=['calldata/GT', 'calldata/GQ'])
gt = callset['calldata/GT']
assert (2, 4, 2) == gt.shape
assert_array_equal([[0, 1], [1, 2], [2, 3], [-1, -1]], gt[0])
assert_array_equal([[-1, -1], [-1, -1], [3, 3], [1, 2]], gt[1])
gq = callset['calldata/GQ']
assert (2, 4) == gq.shape
assert_array_equal([12, -1, 34, -1], gq[0])
assert_array_equal([-1, -1, -1, -1], gq[1])
# noinspection PyTypeChecker
def test_warnings():
warnings.resetwarnings()
warnings.simplefilter('error')
# empty CHROM
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"\t12\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# empty POS
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dodgy POS
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\taaa\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dodgy POS
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12aaa\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dodgy QUAL
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\taaa\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dodgy QUAL
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t1.2aaa\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# empty QUAL - no warning
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t\t.\t.\t.\t.\t.\t.\t.\n"
)
read_vcf(io.BytesIO(input_data))
# empty FILTER - no warning
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t\t.\t.\t.\t.\t.\t.\n"
)
read_vcf(io.BytesIO(input_data))
# empty INFO - no warning
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t\t.\t.\t.\t.\t.\n"
)
read_vcf(io.BytesIO(input_data))
# empty FORMAT - no warning
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\t\t.\t.\t.\t.\n"
)
read_vcf(io.BytesIO(input_data))
# dodgy calldata (integer)
input_data = (
b'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tGT\t0/1\taa/bb\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GT'])
# dodgy calldata (integer)
input_data = (
b'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tGT\t0/1\t12aa/22\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GT'])
# dodgy calldata (float)
input_data = (
b'##FORMAT=<ID=MQ,Number=1,Type=Float,Description="Mapping Quality">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tMQ\t.\t12.3\taaa\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/MQ'])
# dodgy calldata (float)
input_data = (
b'##FORMAT=<ID=MQ,Number=1,Type=Float,Description="Mapping Quality">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tMQ\t.\t12.3\t34.5aaa\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/MQ'])
# dodgy INFO (missing key)
input_data = (
b'##INFO=<ID=MQ,Number=1,Type=Float,Description="Mapping Quality">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\tfoo=qux;MQ=12\t.\t.\t.\t.\t.\n"
b"2L\t34\t.\t.\t.\t.\t.\tfoo=bar;=34;baz\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['variants/MQ'])
# INFO not declared in header
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\tfoo\tA\tC,T\t12.3\tPASS\tfoo=bar\tGT:GQ\t0/0:99\t0/1:12\t./.:.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['variants/foo'])
# FORMAT not declared in header
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\tfoo\tA\tC,T\t12.3\tPASS\tfoo=bar\tGT:GQ\t0/0:99\t0/1:12\t./.:.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GT'])
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GQ'])
warnings.resetwarnings()
warnings.simplefilter('always')
def test_missing_headers():
vcf_path = fixture_path('test14.vcf')
# INFO DP not declared
callset = read_vcf(vcf_path, fields=['DP'], types={'DP': 'String'})
a = callset['variants/DP']
assert '14' == a[2] # default type is string
callset = read_vcf(vcf_path, fields=['DP'], types={'DP': 'Integer'})
a = callset['variants/DP']
assert 14 == a[2]
# what about a field which isn't present at all?
callset = read_vcf(vcf_path, fields=['FOO'])
assert '' == callset['variants/FOO'][2] # default missing value for string field
# FORMAT field DP not declared in VCF header
callset = read_vcf(vcf_path, fields=['calldata/DP'],
types={'calldata/DP': 'Integer'})
assert 1 == callset['calldata/DP'][2, 0]
def test_extra_samples():
# more calldata samples than samples declared in header
path = fixture_path('test48b.vcf')
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tGT:GQ\t0/0:34\t0/1:45\t1/1:56\t1/2:99\t2/3:101\n"
)
warnings.resetwarnings()
warnings.simplefilter('error')
with pytest.warns(UserWarning):
read_vcf(path)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GT', 'calldata/GQ'])
warnings.resetwarnings()
warnings.simplefilter('always')
# try again without raising warnings to check data
callset = read_vcf(io.BytesIO(input_data), fields=['calldata/GT', 'calldata/GQ'])
assert (1, 4, 2) == callset['calldata/GT'].shape
callset = read_vcf(path)
assert (9, 2, 2) == callset['calldata/GT'].shape
# noinspection PyTypeChecker
def test_no_samples():
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\tfoo\tA\tC,T\t12.3\tPASS\tfoo=bar\tGT:GQ\t0/0:99\t0/1:12\t./.:.\t.\n"
)
callset = read_vcf(io.BytesIO(input_data),
fields=['calldata/GT', 'calldata/GQ', 'samples', 'POS'])
assert 'variants/POS' in callset
assert 'samples' not in callset
assert 'calldata/GT' not in callset
assert 'calldata/GQ' not in callset
h5_path = os.path.join(tempdir, 'sample.h5')
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(io.BytesIO(input_data), h5_path,
fields=['calldata/GT', 'calldata/GQ', 'samples', 'POS'])
with h5py.File(h5_path, mode='r') as callset:
assert 'variants/POS' in callset
assert 'samples' not in callset
assert 'calldata/GT' not in callset
assert 'calldata/GQ' not in callset
zarr_path = os.path.join(tempdir, 'sample.zarr')
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(io.BytesIO(input_data), zarr_path,
fields=['calldata/GT', 'calldata/GQ', 'samples', 'POS'])
callset = zarr.open_group(zarr_path, mode='r')
assert 'variants/POS' in callset
assert 'samples' not in callset
assert 'calldata/GT' not in callset
assert 'calldata/GQ' not in callset
def test_computed_fields():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t2\t.\t.\t.\t.\t.\t.\t.\n"
b"2L\t4\t.\t.\tG\t.\t.\t.\t.\n"
b"2L\t12\t.\tA\t.\t.\t.\t.\t.\n"
b"2L\t34\t.\tC\tT\t.\t.\t.\t.\n"
b"3R\t45\t.\tG\tA,T\t.\t.\t.\t.\n"
b"3R\t47\t.\tG\tC,T,*\t.\t.\t.\t.\n"
b"3R\t56\t.\tG\tA,GTAC\t.\t.\t.\t.\n"
b"3R\t56\t.\tCATG\tC,GATG\t.\t.\t.\t.\n"
b"3R\t56\t.\tGTAC\tATAC,GTACTACTAC,G,GTACA,GTA\t.\t.\t.\t.\n")
for string_dtype in 'S20', 'object':
callset = read_vcf(io.BytesIO(input_data),
fields='*',
numbers={'ALT': 5},
types={'REF': string_dtype, 'ALT': string_dtype})
a = callset['variants/ALT']
assert (9, 5) == a.shape
e = np.array([[b'', b'', b'', b'', b''],
[b'G', b'', b'', b'', b''],
[b'', b'', b'', b'', b''],
[b'T', b'', b'', b'', b''],
[b'A', b'T', b'', b'', b''],
[b'C', b'T', b'*', b'', b''],
[b'A', b'GTAC', b'', b'', b''],
[b'C', b'GATG', b'', b'', b''],
[b'ATAC', b'GTACTACTAC', b'G', b'GTACA', b'GTA']])
if a.dtype.kind == 'O':
e = e.astype('U').astype(object)
assert_array_equal(e, a)
a = callset['variants/numalt']
assert (9,) == a.shape
assert_array_equal([0, 1, 0, 1, 2, 3, 2, 2, 5], a)
a = callset['variants/altlen']
assert (9, 5) == a.shape
e = np.array([[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, -1, 0, 0],
[0, 3, 0, 0, 0],
[-3, 0, 0, 0, 0],
[0, 6, -3, 1, -1]])
assert_array_equal(e, a)
a = callset['variants/is_snp']
assert (9,) == a.shape
assert np.dtype(bool) == a.dtype
assert_array_equal([False, False, False, True, True, False, False, False, False],
a)
# test is_snp with reduced ALT number
callset = read_vcf(io.BytesIO(input_data),
fields='*',
numbers={'ALT': 1},
types={'REF': string_dtype, 'ALT': string_dtype})
a = callset['variants/ALT']
assert (9,) == a.shape
e = np.array([b'', b'G', b'', b'T', b'A', b'C', b'A', b'C', b'ATAC'])
if a.dtype.kind == 'O':
e = e.astype('U').astype(object)
assert_array_equal(e, a)
a = callset['variants/numalt']
assert (9,) == a.shape
assert_array_equal([0, 1, 0, 1, 2, 3, 2, 2, 5], a)
a = callset['variants/altlen']
assert (9,) == a.shape
e = np.array([0, 1, 0, 0, 0, 0, 0, -3, 0])
assert_array_equal(e, a)
a = callset['variants/is_snp']
assert (9,) == a.shape
assert np.dtype(bool) == a.dtype
assert_array_equal([False, False, False, True, True, False, False, False, False],
a)
def test_genotype_ac():
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS1\tS2\tS3\n"
b"2L\t12\t.\tA\t.\t.\t.\t.\tGT:GQ\t0/0/0:11\t0/1/2:12\t././.:.\n"
b"2L\t34\t.\tC\tT\t.\t.\t.\tGT:GQ\t0/1/2:22\t3/3/.:33\t.\n"
b"3R\t45\t.\tG\tA,T\t.\t.\t.\tGT:GQ\t0/1:.\t3:12\t\n"
b"X\t55\t.\tG\tA,T\t.\t.\t.\tGT:GQ\t0/1/1/3/4:.\t1/1/2/2/4/4/5:12\t0/0/1/2/3/./4\n"
)
for t in 'i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8':
callset = read_vcf(io.BytesIO(input_data),
fields=['calldata/GT'],
numbers={'calldata/GT': 4},
types={'calldata/GT': 'genotype_ac/' + t})
e = np.array([[[3, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]],
[[1, 1, 1, 0], [0, 0, 0, 2], [0, 0, 0, 0]],
[[1, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]],
[[1, 2, 0, 1], [0, 2, 2, 0], [2, 1, 1, 1]]], dtype=t)
a = callset['calldata/GT']
assert e.dtype == a.dtype
assert_array_equal(e, a)
vcf_path = fixture_path('test63.vcf')
callset = read_vcf(vcf_path, fields='GT', numbers={'GT': 3},
types={'GT': 'genotype_ac/i1'})
e = np.array([
[(2, 0, 0), (3, 0, 0), (1, 0, 0)],
[(0, 1, 0), (1, 1, 0), (1, 1, 1)],
[(0, 0, 0), (0, 0, 0), (0, 0, 0)],
[(0, 0, 0), (0, 0, 0), (0, 0, 0)],
])
a = callset['calldata/GT']
assert_array_equal(e, a)
def test_region_truncate():
vcf_path = fixture_path('test54.vcf.gz')
for tabix in 'tabix', None:
callset = read_vcf(vcf_path, region='chr1:10-100', tabix=tabix)
pos = callset['variants/POS']
assert 2 == pos.shape[0]
assert_array_equal([20, 30], pos)
def test_errors():
# try to open a directory
path = '.'
with pytest.raises(OSError):
read_vcf(path)
# try to open a file that doesn't exist
path = 'doesnotexist.vcf'
with pytest.raises(FileNotFoundError):
read_vcf(path)
# try to open a file that doesn't exist
path = 'doesnotexist.vcf.gz'
with pytest.raises(FileNotFoundError):
read_vcf(path)
# file is nothing like a VCF (has no header)
path = fixture_path('test48a.vcf')
with pytest.raises(RuntimeError):
read_vcf(path)
def test_dup_headers():
warnings.resetwarnings()
warnings.simplefilter('error')
# dup FILTER
input_data = b"""##fileformat=VCFv4.1
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=AD,Number=A,Type=Integer,Description="Allele Depths">
##FORMAT=<ID=ZZ,Number=1,Type=String,Description="ZZ">
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT test1 test2 test3 test4
chr1 1 . A G . PASS DP=2 GT:AD 0:1,0 .:1,0 0:0,0 .:0,0
chr1 2 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 0:1,0 0:0,0 .:0,0
chr1 3 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 1:1,0 . ./.
"""
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dup INFO
input_data = b"""##fileformat=VCFv4.1
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=AD,Number=A,Type=Integer,Description="Allele Depths">
##FORMAT=<ID=ZZ,Number=1,Type=String,Description="ZZ">
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT test1 test2 test3 test4
chr1 1 . A G . PASS DP=2 GT:AD 0:1,0 .:1,0 0:0,0 .:0,0
chr1 2 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 0:1,0 0:0,0 .:0,0
chr1 3 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 1:1,0 . ./.
"""
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dup FORMAT
input_data = b"""##fileformat=VCFv4.1
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=AD,Number=A,Type=Integer,Description="Allele Depths">
##FORMAT=<ID=AD,Number=A,Type=Integer,Description="Allele Depths">
##FORMAT=<ID=ZZ,Number=1,Type=String,Description="ZZ">
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT test1 test2 test3 test4
chr1 1 . A G . PASS DP=2 GT:AD 0:1,0 .:1,0 0:0,0 .:0,0
chr1 2 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 0:1,0 0:0,0 .:0,0
chr1 3 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 1:1,0 . ./.
"""
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
warnings.resetwarnings()
warnings.simplefilter('always')
def test_override_vcf_type():
vcf_path = fixture_path('test4.vcf')
callset = read_vcf(vcf_path, fields=['MQ0FractionTest'])
assert 0 == callset['variants/MQ0FractionTest'][2]
callset = read_vcf(vcf_path, fields=['MQ0FractionTest'],
types={'MQ0FractionTest': 'Float'})
assert approx(0.03) == callset['variants/MQ0FractionTest'][2]
def test_header_overrides_default_vcf_type():
vcf_path = fixture_path('test176.vcf')
callset = read_vcf(vcf_path, fields='*')
gq = callset['calldata/GQ']
assert 'f' == gq.dtype.kind
assert np.isnan(gq[0, 0])
assert approx(48.2) == gq[2, 0]
assert approx(48.1) == gq[2, 1]
assert approx(43.9) == gq[2, 2]
assert approx(49.) == gq[3, 0]
assert approx(3.) == gq[3, 1]
assert approx(41.) == gq[3, 2]
def test_missing_calldata():
vcf_path = fixture_path('test1.vcf')
callset = read_vcf(vcf_path, fields='calldata/*', numbers={'AD': 2})
gt = callset['calldata/GT']
ad = callset['calldata/AD']
assert (-1, -1) == tuple(gt[0, 1])
assert (1, 0) == tuple(ad[0, 1])
assert (-1, -1) == tuple(gt[2, 2])
assert (-1, -1) == tuple(ad[2, 2])
assert (-1, -1) == tuple(gt[2, 3])
assert (-1, -1) == tuple(ad[2, 3])
def test_calldata_cleared():
vcf_path = fixture_path('test32.vcf')
callset = read_vcf(vcf_path, fields=['calldata/GT', 'calldata/DP', 'calldata/GQ'])
gt = callset['calldata/GT']
dp = callset['calldata/DP']
gq = callset['calldata/GQ']
assert (0, 0) == tuple(gt[0, 3])
assert 8 == dp[0, 3]
assert 3 == gq[0, 3]
assert (-1, -1) == tuple(gt[1, 3])
assert -1 == dp[1, 3]
assert -1 == gq[1, 3]
def test_calldata_quirks():
vcf_path = fixture_path('test1.vcf')
callset = read_vcf(vcf_path, fields=['AD', 'GT'], numbers={'AD': 2})
gt = callset['calldata/GT']
ad = callset['calldata/AD']
e = np.array([[-1, -1], [0, -1], [1, -1]])
assert_array_equal(e, gt[:, 1])
e = np.array([[1, 0], [1, 0], [1, 0]])
assert_array_equal(e, ad[:, 1])
def test_vcf_to_npz():
vcf_paths = [fixture_path(x) for x in ['sample.vcf', 'sample.vcf.gz']]
npz_path = os.path.join(tempdir, 'sample.npz')
region_values = None, '20', '20:10000-20000', 'Y'
tabix_values = 'tabix', None
samples_values = None, ['NA00001', 'NA00003']
string_type_values = 'S10', 'object'
param_matrix = itertools.product(vcf_paths, region_values, tabix_values,
samples_values, string_type_values)
for vcf_path, region, tabix, samples, string_type in param_matrix:
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
expected = read_vcf(vcf_path, fields='*', alt_number=2, region=region,
tabix=tabix, samples=samples, types=types)
if os.path.exists(npz_path):
os.remove(npz_path)
vcf_to_npz(vcf_path, npz_path, fields='*', chunk_length=2, alt_number=2,
region=region, tabix=tabix, samples=samples, types=types)
if expected is None:
assert not os.path.exists(npz_path)
else:
actual = np.load(npz_path, allow_pickle=True)
for key in expected.keys():
if expected[key].dtype.kind == 'f':
assert_array_almost_equal(expected[key], actual[key])
else:
assert_array_equal(expected[key], actual[key])
for key in actual.keys():
assert key in expected
actual.close()
def test_vcf_to_npz_exclude():
vcf_path = fixture_path('sample.vcf')
npz_path = os.path.join(tempdir, 'sample.npz')
exclude = ['variants/altlen', 'ID', 'calldata/DP']
expected = read_vcf(vcf_path, fields='*', exclude_fields=exclude)
if os.path.exists(npz_path):
os.remove(npz_path)
vcf_to_npz(vcf_path, npz_path, fields='*', exclude_fields=exclude)
actual = np.load(npz_path, allow_pickle=True)
for key in expected.keys():
if expected[key].dtype.kind == 'f':
assert_array_almost_equal(expected[key], actual[key])
else:
assert_array_equal(expected[key], actual[key])
for key in actual.keys():
assert key in expected
actual.close()
def test_vcf_to_npz_rename():
vcf_path = fixture_path('sample.vcf')
npz_path = os.path.join(tempdir, 'sample.npz')
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'foo/bar'}
expected = read_vcf(vcf_path, fields='*', rename_fields=rename)
if os.path.exists(npz_path):
os.remove(npz_path)
vcf_to_npz(vcf_path, npz_path, fields='*', rename_fields=rename)
actual = np.load(npz_path, allow_pickle=True)
for key in expected.keys():
if expected[key].dtype.kind == 'f':
assert_array_almost_equal(expected[key], actual[key])
else:
assert_array_equal(expected[key], actual[key])
for key in actual.keys():
assert key in expected
actual.close()
def test_vcf_to_zarr():
vcf_paths = [fixture_path(x) for x in ['sample.vcf', 'sample.vcf.gz']]
zarr_path = os.path.join(tempdir, 'sample.zarr')
region_values = None, '20', '20:10000-20000', 'Y'
tabix_values = 'tabix', None
samples_values = None, ['NA00001', 'NA00003']
string_type_values = 'S10', 'object'
param_matrix = itertools.product(vcf_paths, region_values, tabix_values,
samples_values, string_type_values)
for vcf_path, region, tabix, samples, string_type in param_matrix:
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
expected = read_vcf(vcf_path, fields='*', alt_number=2, region=region,
tabix=tabix, samples=samples, types=types)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,
region=region, tabix=tabix, samples=samples, types=types)
if expected is None:
assert not os.path.exists(zarr_path)
else:
actual = zarr.open_group(zarr_path, mode='r')
for key in expected.keys():
e = expected[key]
a = actual[key][:]
compare_arrays(e, a)
assert (actual['variants/NS'].attrs['Description'] ==
'Number of Samples With Data')
assert (actual['calldata/GQ'].attrs['Description'] ==
'Genotype Quality')
for key in actual.keys():
if key not in {'variants', 'calldata'}:
assert key in expected
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_zarr_exclude():
vcf_path = fixture_path('sample.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
exclude = ['variants/altlen', 'ID', 'calldata/DP']
expected = read_vcf(vcf_path, fields='*', exclude_fields=exclude)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', exclude_fields=exclude)
actual = zarr.open_group(zarr_path, mode='r')
for key in expected.keys():
e = expected[key]
a = actual[key][:]
compare_arrays(e, a)
for key in actual.keys():
if key not in {'variants', 'calldata'}:
assert key in expected
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_zarr_rename():
vcf_path = fixture_path('sample.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'foo/bar'}
expected = read_vcf(vcf_path, fields='*', rename_fields=rename)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', rename_fields=rename)
actual = zarr.open_group(zarr_path, mode='r')
for key in expected.keys():
e = expected[key]
a = actual[key][:]
compare_arrays(e, a)
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_zarr_rename_clash():
vcf_path = fixture_path('sample.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
# dup values
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'spam/eggs'}
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'spam'}
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam',
'calldata/GT': 'spam/eggs'}
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields='*', rename_fields=rename)
def test_vcf_to_zarr_dup_fields_case_insensitive():
vcf_path = fixture_path('altlen.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields=['ALTLEN', 'altlen'])
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields=['variants/ALTLEN', 'variants/altlen'])
# should be fine if renamed
vcf_to_zarr(vcf_path, zarr_path, fields=['ALTLEN', 'altlen'],
rename_fields={'altlen': 'variants/spam'})
def test_vcf_to_zarr_group():
vcf_path = fixture_path('sample.vcf.gz')
zarr_path = os.path.join(tempdir, 'sample.zarr')
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
chroms = ['19', '20', 'X']
for chrom in chroms:
vcf_to_zarr(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,
region=chrom, group=chrom)
actual = zarr.open_group(zarr_path, mode='r')
assert chroms == sorted(actual)
for chrom in chroms:
assert ['calldata', 'samples', 'variants'] == sorted(actual[chrom])
expect = read_vcf(vcf_path, fields='*', alt_number=2, region=chrom)
for key in expect.keys():
e = expect[key]
a = actual[chrom][key][:]
compare_arrays(e, a)
assert (actual[chrom]['variants/NS'].attrs['Description'] ==
'Number of Samples With Data')
assert (actual[chrom]['calldata/GQ'].attrs['Description'] ==
'Genotype Quality')
def test_vcf_to_zarr_string_codec():
vcf_path = fixture_path('sample.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
types = {'CHROM': object, 'ALT': object, 'samples': object}
expect = read_vcf(vcf_path, fields='*', alt_number=2, types=types)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,
types=types)
actual = zarr.open_group(zarr_path, mode='r')
for key in expect.keys():
e = expect[key]
a = actual[key][:]
compare_arrays(e, a)
def test_vcf_to_zarr_ann():
vcf_path = fixture_path('ann.vcf')
zarr_path = os.path.join(tempdir, 'ann.zarr')
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
expected = read_vcf(vcf_path, fields='*', alt_number=2, types=types,
transformers=transformers)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,
types=types, transformers=transformers)
actual = zarr.open_group(zarr_path, mode='r')
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
def test_vcf_to_zarr_empty():
vcf_path = fixture_path('empty.vcf')
zarr_path = os.path.join(tempdir, 'empty.zarr')
vcf_to_zarr(vcf_path, zarr_path)
assert not os.path.exists(zarr_path)
def test_vcf_to_hdf5():
vcf_paths = [fixture_path(x) for x in ['sample.vcf', 'sample.vcf.gz']]
h5_path = os.path.join(tempdir, 'sample.h5')
region_values = None, '20', '20:10000-20000', 'Y'
tabix_values = 'tabix', None
samples_values = None, ['NA00001', 'NA00003']
string_type_values = 'S10', 'object'
param_matrix = itertools.product(vcf_paths, region_values, tabix_values,
samples_values, string_type_values)
for vcf_path, region, tabix, samples, string_type in param_matrix:
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
expected = read_vcf(vcf_path, fields='*', alt_number=2, region=region,
tabix=tabix, samples=samples, types=types)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields='*', alt_number=2, chunk_length=2,
region=region, tabix=tabix, samples=samples, types=types)
if expected is None:
assert not os.path.exists(h5_path)
else:
with h5py.File(h5_path, mode='r') as actual:
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
assert (actual['variants/NS'].attrs['Description'] ==
'Number of Samples With Data')
assert (actual['calldata/GQ'].attrs['Description'] ==
'Genotype Quality')
for key in actual.keys():
if key not in {'variants', 'calldata'}:
assert key in expected
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_hdf5_exclude():
vcf_path = fixture_path('sample.vcf')
h5_path = os.path.join(tempdir, 'sample.h5')
exclude = ['variants/altlen', 'ID', 'calldata/DP']
expected = read_vcf(vcf_path, fields='*', exclude_fields=exclude)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields='*', exclude_fields=exclude)
with h5py.File(h5_path, mode='r') as actual:
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
for key in actual.keys():
if key not in {'variants', 'calldata'}:
assert key in expected
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_hdf5_rename():
vcf_path = fixture_path('sample.vcf')
h5_path = os.path.join(tempdir, 'sample.h5')
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'foo/bar'}
expected = read_vcf(vcf_path, fields='*', rename_fields=rename)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields='*', rename_fields=rename)
with h5py.File(h5_path, mode='r') as actual:
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_hdf5_group():
vcf_path = fixture_path('sample.vcf.gz')
h5_path = os.path.join(tempdir, 'sample.h5')
if os.path.exists(h5_path):
os.remove(h5_path)
chroms = ['19', '20', 'X']
for chrom in chroms:
vcf_to_hdf5(vcf_path, h5_path, fields='*', alt_number=2, chunk_length=2,
region=chrom, group=chrom)
with h5py.File(h5_path, mode='r') as actual:
assert chroms == sorted(actual)
for chrom in chroms:
assert ['calldata', 'samples', 'variants'] == sorted(actual[chrom])
expect = read_vcf(vcf_path, fields='*', alt_number=2, region=chrom)
for key in expect.keys():
e = expect[key]
a = actual[chrom][key][:]
compare_arrays(e, a)
assert (actual[chrom]['variants/NS'].attrs['Description'] ==
'Number of Samples With Data')
assert (actual[chrom]['calldata/GQ'].attrs['Description'] ==
'Genotype Quality')
def test_vcf_to_hdf5_ann():
vcf_path = fixture_path('ann.vcf')
h5_path = os.path.join(tempdir, 'ann.h5')
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
expected = read_vcf(vcf_path, fields='*', types=types, transformers=transformers)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields='*', chunk_length=2, types=types,
transformers=transformers)
with h5py.File(h5_path, mode='r') as actual:
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
def test_vcf_to_hdf5_vlen():
vcf_path = fixture_path('sample.vcf')
h5_path = os.path.join(tempdir, 'sample.h5')
fields = ['CHROM', 'ID', 'samples']
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ID': string_type, 'samples': string_type}
expect = read_vcf(vcf_path, fields=fields, alt_number=2, types=types)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields=fields, alt_number=2, chunk_length=3,
types=types, vlen=False)
with h5py.File(h5_path, mode='r') as actual:
for key in expect.keys():
if expect[key].dtype.kind == 'f':
assert_array_almost_equal(expect[key], actual[key][:])
elif expect[key].dtype.kind == 'O':
# strings always stored as fixed length if vlen=False
assert 'S' == actual[key].dtype.kind
assert_array_equal(expect[key].astype('S'), actual[key][:])
else:
assert_array_equal(expect[key], actual[key][:])
def test_vcf_to_hdf5_empty():
vcf_path = fixture_path('empty.vcf')
h5_path = os.path.join(tempdir, 'empty.h5')
vcf_to_hdf5(vcf_path, h5_path)
assert not os.path.exists(h5_path)
def to_pandas_expectation(e):
# expect that all string fields end up as objects with nans for missing
if e.dtype.kind == 'S':
e = e.astype('U').astype(object)
if e.dtype == object:
e[e == ''] = np.nan
return e
def check_dataframe(callset, df):
for k in callset:
if k.startswith('variants/'):
group, name = k.split('/')
e = to_pandas_expectation(callset[k])
if e.ndim == 1:
compare_arrays(e, df[name].values)
elif e.ndim == 2:
for i in range(e.shape[1]):
compare_arrays(e[:, i], df['%s_%s' % (name, i + 1)])
def test_vcf_to_dataframe():
vcf_path = fixture_path('sample.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'DP', 'AC', 'GT']
numbers = {'AC': 3}
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type}
callset = read_vcf(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types)
df = vcf_to_dataframe(vcf_path, fields=fields, alt_number=2, numbers=numbers,
chunk_length=2, types=types)
assert (['CHROM', 'POS', 'REF', 'ALT_1', 'ALT_2', 'DP', 'AC_1', 'AC_2', 'AC_3'] ==
df.columns.tolist())
# always convert strings to object dtype for pandas
assert np.dtype(object) == df['CHROM'].dtype
assert np.dtype(object) == df['ALT_1'].dtype
check_dataframe(callset, df)
def test_vcf_to_dataframe_all():
vcf_path = fixture_path('sample.vcf')
fields = '*'
numbers = {'AC': 3}
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type}
callset = read_vcf(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types)
df = vcf_to_dataframe(vcf_path, fields=fields, alt_number=2, numbers=numbers,
chunk_length=2, types=types)
for k in ['CHROM', 'POS', 'ID', 'REF', 'ALT_1', 'ALT_2', 'DP', 'AC_1',
'AC_2', 'AC_3']:
assert k in df.columns.tolist()
# always convert strings to object dtype for pandas
assert np.dtype(object) == df['CHROM'].dtype
assert np.dtype(object) == df['ALT_1'].dtype
check_dataframe(callset, df)
def test_vcf_to_dataframe_exclude():
vcf_path = fixture_path('sample.vcf')
fields = '*'
exclude = ['ALT', 'ID']
df = vcf_to_dataframe(vcf_path, fields=fields, exclude_fields=exclude)
for k in ['CHROM', 'POS', 'REF', 'DP', 'AC_1', 'AC_2', 'AC_3']:
assert k in df.columns.tolist()
for k in ['ALT_1', 'ALT_2', 'ID']:
assert k not in df.columns.tolist()
def test_vcf_to_dataframe_ann():
vcf_path = fixture_path('ann.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'ANN', 'DP', 'AC', 'GT']
numbers = {'AC': 2, 'ALT': 2}
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
callset = read_vcf(vcf_path, fields=fields, numbers=numbers, types=types,
transformers=transformers)
df = vcf_to_dataframe(vcf_path, fields=fields, numbers=numbers, chunk_length=2,
types=types, transformers=transformers)
assert (['CHROM', 'POS', 'REF', 'ALT_1', 'ALT_2', 'ANN_Allele', 'ANN_HGVS_c',
'ANN_AA_pos', 'ANN_AA_length', 'DP', 'AC_1', 'AC_2'] ==
df.columns.tolist())
# always convert strings to object dtype for pandas
assert np.dtype(object) == df['CHROM'].dtype
assert np.dtype(object) == df['ALT_1'].dtype
check_dataframe(callset, df)
def test_vcf_to_csv():
vcf_path = fixture_path('sample.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'DP', 'AC', 'GT']
numbers = {'AC': 3}
for string_type in 'S20', 'object':
types = {'REF': string_type, 'ALT': string_type}
df = vcf_to_dataframe(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types, chunk_length=2)
csv_path = os.path.join(tempdir, 'test.csv')
if os.path.exists(csv_path):
os.remove(csv_path)
vcf_to_csv(vcf_path, csv_path, fields=fields, alt_number=2, numbers=numbers,
types=types, chunk_length=2)
import pandas
adf = pandas.read_csv(csv_path, na_filter=True)
assert df.columns.tolist() == adf.columns.tolist()
for k in df.columns:
compare_arrays(df[k].values, adf[k].values)
def test_vcf_to_csv_all():
vcf_path = fixture_path('sample.vcf')
fields = '*'
df = vcf_to_dataframe(vcf_path, fields=fields)
csv_path = os.path.join(tempdir, 'test.csv')
if os.path.exists(csv_path):
os.remove(csv_path)
vcf_to_csv(vcf_path, csv_path, fields=fields)
import pandas
adf = pandas.read_csv(csv_path, na_filter=True)
assert df.columns.tolist() == adf.columns.tolist()
for k in df.columns:
compare_arrays(df[k].values, adf[k].values)
def test_vcf_to_csv_exclude():
vcf_path = fixture_path('sample.vcf')
fields = '*'
exclude = ['ALT', 'ID']
df = vcf_to_dataframe(vcf_path, fields=fields, exclude_fields=exclude)
csv_path = os.path.join(tempdir, 'test.csv')
if os.path.exists(csv_path):
os.remove(csv_path)
vcf_to_csv(vcf_path, csv_path, fields=fields, exclude_fields=exclude)
import pandas
adf = pandas.read_csv(csv_path, na_filter=True)
assert df.columns.tolist() == adf.columns.tolist()
def test_vcf_to_csv_ann():
vcf_path = fixture_path('ann.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'DP', 'AC', 'ANN', 'GT']
numbers = {'AC': 2, 'ALT': 2}
for string_type in 'S20', 'object':
types = {'CHROM': string_type, 'REF': string_type, 'ALT': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
df = vcf_to_dataframe(vcf_path, fields=fields, numbers=numbers, types=types,
chunk_length=2, transformers=transformers)
csv_path = os.path.join(tempdir, 'test.csv')
if os.path.exists(csv_path):
os.remove(csv_path)
vcf_to_csv(vcf_path, csv_path, fields=fields, numbers=numbers, types=types,
chunk_length=2, transformers=transformers)
import pandas
adf = pandas.read_csv(csv_path, na_filter=True)
assert df.columns.tolist() == adf.columns.tolist()
for k in df.columns:
compare_arrays(df[k].values, adf[k].values)
def test_vcf_to_recarray():
vcf_path = fixture_path('sample.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'DP', 'AC', 'GT']
numbers = {'AC': 3}
for string_type in 'S20', 'object':
types = {'CHROM': string_type, 'REF': string_type, 'ALT': string_type}
callset = read_vcf(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types)
a = vcf_to_recarray(vcf_path, fields=fields, alt_number=2, numbers=numbers,
chunk_length=2, types=types)
assert (['CHROM', 'POS', 'REF', 'ALT_1', 'ALT_2', 'DP', 'AC_1', 'AC_2', 'AC_3']
== list(a.dtype.names))
assert np.dtype(string_type) == a['CHROM'].dtype
for k in callset:
if k.startswith('variants/'):
group, name = k.split('/')
e = callset[k]
if e.ndim == 1:
assert_array_equal(e, a[name])
elif e.ndim == 2:
for i in range(e.shape[1]):
assert_array_equal(e[:, i], a['%s_%s' % (name, i + 1)])
else:
assert False, (k, e.ndim)
def test_vcf_to_recarray_all():
vcf_path = fixture_path('sample.vcf')
fields = '*'
numbers = {'AC': 3}
for string_type in 'S20', 'object':
types = {'CHROM': string_type, 'REF': string_type, 'ALT': string_type}
callset = read_vcf(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types)
a = vcf_to_recarray(vcf_path, fields=fields, alt_number=2, numbers=numbers,
chunk_length=2, types=types)
for k in ['CHROM', 'POS', 'ID', 'REF', 'ALT_1', 'ALT_2', 'DP', 'AC_1',
'AC_2', 'AC_3']:
assert k in a.dtype.names
assert np.dtype(string_type) == a['CHROM'].dtype
for k in callset:
if k.startswith('variants/'):
group, name = k.split('/')
e = callset[k]
if e.ndim == 1:
assert_array_equal(e, a[name])
elif e.ndim == 2:
for i in range(e.shape[1]):
assert_array_equal(e[:, i], a['%s_%s' % (name, i + 1)])
else:
assert False, (k, e.ndim)
def test_vcf_to_recarray_exclude():
vcf_path = fixture_path('sample.vcf')
fields = '*'
exclude = ['ALT', 'ID']
a = vcf_to_recarray(vcf_path, fields=fields, exclude_fields=exclude)
for k in ['CHROM', 'POS', 'REF', 'DP', 'AC_1', 'AC_2', 'AC_3']:
assert k in a.dtype.names
for k in 'ALT_1', 'ALT_2', 'ALT', 'ID':
assert k not in a.dtype.names
def test_vcf_to_recarray_ann():
vcf_path = fixture_path('ann.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'ANN', 'DP', 'AC', 'GT']
numbers = {'AC': 2, 'ALT': 2}
for string_type in 'S20', 'object':
types = {'CHROM': string_type, 'REF': string_type, 'ALT': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
callset = read_vcf(vcf_path, fields=fields, numbers=numbers, types=types,
transformers=transformers)
a = vcf_to_recarray(vcf_path, fields=fields, numbers=numbers, chunk_length=2,
types=types, transformers=transformers)
assert (['CHROM', 'POS', 'REF', 'ALT_1', 'ALT_2', 'ANN_Allele', 'ANN_HGVS_c',
'ANN_AA_pos', 'ANN_AA_length', 'DP', 'AC_1', 'AC_2'] ==
list(a.dtype.names))
assert np.dtype(string_type) == a['CHROM'].dtype
assert np.dtype(string_type) == a['ALT_1'].dtype
for k in callset:
group, name = k.split('/')
if group == 'variants':
e = callset[k]
if e.ndim == 1:
assert_array_equal(e, a[name])
elif e.ndim == 2:
for i in range(e.shape[1]):
assert_array_equal(e[:, i], a['%s_%s' % (name, i + 1)])
else:
assert False, (k, e.ndim)
else:
assert name not in a.dtype.names
def test_read_vcf_headers():
vcf_path = fixture_path('sample.vcf')
headers = read_vcf_headers(vcf_path)
# check headers
assert 'q10' in headers.filters
assert 's50' in headers.filters
assert 'AA' in headers.infos
assert 'AC' in headers.infos
assert 'AF' in headers.infos
assert 'AN' in headers.infos
assert 'DB' in headers.infos
assert 'DP' in headers.infos
assert 'H2' in headers.infos
assert 'NS' in headers.infos
assert 'DP' in headers.formats
assert 'GQ' in headers.formats
assert 'GT' in headers.formats
assert 'HQ' in headers.formats
assert ['NA00001', 'NA00002', 'NA00003'] == headers.samples
assert '1' == headers.infos['AA']['Number']
assert 'String' == headers.infos['AA']['Type']
assert 'Ancestral Allele' == headers.infos['AA']['Description']
assert '2' == headers.formats['HQ']['Number']
assert 'Integer' == headers.formats['HQ']['Type']
assert 'Haplotype Quality' == headers.formats['HQ']['Description']
| 36.531474
| 91
| 0.569609
| 13,871
| 102,142
| 4.066758
| 0.039146
| 0.0273
| 0.008137
| 0.008225
| 0.873498
| 0.840844
| 0.804343
| 0.766548
| 0.740542
| 0.724233
| 0
| 0.038698
| 0.26379
| 102,142
| 2,795
| 92
| 36.544544
| 0.711442
| 0.030839
| 0
| 0.6751
| 0
| 0.037284
| 0.207808
| 0.069762
| 0
| 0
| 0
| 0
| 0.293387
| 1
| 0.041278
| false
| 0.015979
| 0.008877
| 0.000444
| 0.051043
| 0.000444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
460583f24fc71192a0d98ebe262aca81c8b40bc9
| 319
|
py
|
Python
|
hubspot/crm/quotes/api/__init__.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 117
|
2020-04-06T08:22:53.000Z
|
2022-03-18T03:41:29.000Z
|
hubspot/crm/quotes/api/__init__.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 62
|
2020-04-06T16:21:06.000Z
|
2022-03-17T16:50:44.000Z
|
hubspot/crm/quotes/api/__init__.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 45
|
2020-04-06T16:13:52.000Z
|
2022-03-30T21:33:17.000Z
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from hubspot.crm.quotes.api.associations_api import AssociationsApi
from hubspot.crm.quotes.api.basic_api import BasicApi
from hubspot.crm.quotes.api.batch_api import BatchApi
from hubspot.crm.quotes.api.search_api import SearchApi
| 31.9
| 67
| 0.84326
| 48
| 319
| 5.416667
| 0.4375
| 0.169231
| 0.215385
| 0.307692
| 0.353846
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003472
| 0.097179
| 319
| 9
| 68
| 35.444444
| 0.899306
| 0.128527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4615ae734f34dbcd82057bf39d21ed059d963f67
| 30,897
|
py
|
Python
|
code/pyto/io/test/test_image_io.py
|
anmartinezs/pyseg_system
|
5bb07c7901062452a34b73f376057cabc15a13c3
|
[
"Apache-2.0"
] | 12
|
2020-01-08T01:33:02.000Z
|
2022-03-16T00:25:34.000Z
|
code/pyto/io/test/test_image_io.py
|
anmartinezs/pyseg_system
|
5bb07c7901062452a34b73f376057cabc15a13c3
|
[
"Apache-2.0"
] | 8
|
2019-12-19T19:34:56.000Z
|
2022-03-10T10:11:28.000Z
|
code/pyto/io/test/test_image_io.py
|
anmartinezs/pyseg_system
|
5bb07c7901062452a34b73f376057cabc15a13c3
|
[
"Apache-2.0"
] | 2
|
2022-03-30T13:12:22.000Z
|
2022-03-30T18:12:10.000Z
|
"""
Tests module image_io
# Author: Vladan Lucic
# $Id:$
"""
from __future__ import unicode_literals
from __future__ import print_function
__version__ = "$Revision:$"
from copy import copy, deepcopy
import pickle
import os.path
import unittest
import numpy
import numpy.testing as np_test
import scipy
from pyto.io.image_io import ImageIO
class TestImageIO(np_test.TestCase):
"""
Tests class ImageIO
"""
def setUp(self):
"""
Sets absolute path to this file directory and saves it as self.dir
"""
# set absolute path to current dir
working_dir = os.getcwd()
file_dir, name = os.path.split(__file__)
self.dir = os.path.join(working_dir, file_dir)
# make raw file
self.raw_shape = (4,3,2)
self.raw_dtype = 'int16'
self.raw_data = numpy.arange(
24, dtype=self.raw_dtype).reshape(self.raw_shape)
raw = ImageIO()
self.raw_file_name = 'data.raw'
raw.write(file=self.raw_file_name, data=self.raw_data)
def testRead(self):
"""
Tests reading EM and MRC files
"""
# EM tomo
em = ImageIO()
em.read(file=os.path.join(self.dir, "bin-2.em"))
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(em.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(em.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(em.byteOrder, '<')
np_test.assert_equal(em.arrayOrder, 'F')
np_test.assert_equal(em.dataType, 'float32')
np_test.assert_equal(em.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(em.memmap, False)
# EM tomo with memory map
em.read(file=os.path.join(self.dir, "bin-2.em"), memmap=True)
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(em.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(em.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(em.byteOrder, '<')
np_test.assert_equal(em.arrayOrder, 'F')
np_test.assert_equal(em.dataType, 'float32')
np_test.assert_equal(em.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(em.memmap, True)
# EM, big-endian
em = ImageIO()
em.read(file=os.path.join(self.dir, "mac-file.em"))
np_test.assert_equal(em.byteOrder, '>')
# EM, little-endian
em = ImageIO()
em.read(file=os.path.join(self.dir, "pc-file.em"))
np_test.assert_equal(em.byteOrder, '<')
em.read(file=os.path.join(self.dir, "pc-file.em"), memmap=True)
np_test.assert_equal(em.byteOrder, '<')
# MRC tomo
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "bin-2.mrc"))
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(mrc.byteOrder, '<')
np_test.assert_equal(mrc.arrayOrder, 'F')
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, False)
# MRC tomo with memmap
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "bin-2.mrc"), memmap=True)
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(mrc.byteOrder, '<')
np_test.assert_equal(mrc.arrayOrder, 'F')
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, True)
# MRC tomo with extended header
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "bin-2_ext.mrc"), memmap=False)
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(mrc.byteOrder, '<')
np_test.assert_equal(mrc.arrayOrder, 'F')
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, False)
np_test.assert_equal(mrc.extendedHeaderLength, 5120)
# MRC tomo with extended header and with memmap
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "bin-2_ext.mrc"), memmap=True)
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(mrc.byteOrder, '<')
np_test.assert_equal(mrc.arrayOrder, 'F')
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, True)
np_test.assert_equal(mrc.extendedHeaderLength, 5120)
# another MRC tomo (generated by and)
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "and-tomo.mrc"))
expected = numpy.array([[-0.0329, -0.0006, -0.0698],
[-0.0101, -0.1196, -0.1295],
[0.0844, -0.0400, -0.0716]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0019, -0.0085, 0.0036],
[0.0781, 0.0279, -0.0365],
[0.0210, -0.0193, -0.0355]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 60], expected,
decimal=4)
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, False)
# another MRC tomo (generated by and) with memmap
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "and-tomo.mrc"), memmap=True)
expected = numpy.array([[-0.0329, -0.0006, -0.0698],
[-0.0101, -0.1196, -0.1295],
[0.0844, -0.0400, -0.0716]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0019, -0.0085, 0.0036],
[0.0781, 0.0279, -0.0365],
[0.0210, -0.0193, -0.0355]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 60], expected,
decimal=4)
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, True)
# mrc with the opposite byte order
mrc2 = ImageIO()
mrc2.read(file=os.path.join(self.dir, "swapped_byte_order.mrc"))
expected = numpy.array(
[[ 0.000, 0.000],
[-0.341, -6.702],
[0.782, -11.780],
[0.327, -14.298],
[-0.691, -17.411],
[-0.337, -18.076],
[-0.669, -19.157],
[-0.799, -20.400],
[-0.793, -21.286],
[-1.008, -21.386]])
np_test.assert_almost_equal(mrc2.data[:,:,0], expected, decimal=3)
np_test.assert_equal(mrc2.memmap, False)
raised = False
try:
mrc2.read(
file=os.path.join(self.dir, "swapped_byte_order.mrc"),
memmap=True)
except ValueError:
raised = True
np_test.assert_equal(raised, True)
np_test.assert_equal(mrc2.memmap, True)
# new style header mrc
mrc_new = ImageIO()
mrc_new.read(file=os.path.join(self.dir, 'new-head_int16.mrc'))
np_test.assert_equal(mrc_new.dataType, 'int16')
np_test.assert_equal(mrc_new.data.dtype, numpy.dtype('int16'))
np_test.assert_equal(mrc_new.byteOrder, '<')
np_test.assert_equal(mrc_new.arrayOrder, 'F')
np_test.assert_equal(mrc_new.shape, (40,30,20))
np_test.assert_equal(mrc_new.pixel, [0.4, 0.4, 0.4])
np_test.assert_equal(mrc_new.pixelsize, 0.4)
np_test.assert_equal(mrc_new.data[14,8,10], -14)
np_test.assert_equal(mrc_new.data[15,23,12], 10)
np_test.assert_equal(mrc_new.data[23,29,16], 2)
np_test.assert_equal(mrc_new.memmap, False)
# new style header mrc
mrc_new = ImageIO()
mrc_new.read(
file=os.path.join(self.dir, 'new-head_int16.mrc'), memmap=True)
np_test.assert_equal(mrc_new.dataType, 'int16')
np_test.assert_equal(mrc_new.data.dtype, numpy.dtype('int16'))
np_test.assert_equal(mrc_new.byteOrder, '<')
np_test.assert_equal(mrc_new.arrayOrder, 'F')
np_test.assert_equal(mrc_new.shape, (40,30,20))
np_test.assert_equal(mrc_new.pixel, [0.4, 0.4, 0.4])
np_test.assert_equal(mrc_new.pixelsize, 0.4)
np_test.assert_equal(mrc_new.data[14,8,10], -14)
np_test.assert_equal(mrc_new.data[15,23,12], 10)
np_test.assert_equal(mrc_new.data[23,29,16], 2)
np_test.assert_equal(mrc_new.memmap, True)
np_test.assert_equal(mrc_new.n_labels, 9)
np_test.assert_equal(len(mrc_new.labels), 9)
desired = (
b"COMBINEFFT: Combined FFT from two tomograms "
+ b"07-Oct-13 17:15:24" )
np_test.assert_equal(len(mrc_new.labels[3]), 80)
np_test.assert_equal(mrc_new.labels[3][:len(desired)], desired)
desired = (
b"NEWSTACK: Images copied 10-Oct-13 18:00:03")
np_test.assert_equal(len(mrc_new.labels[6]), 80)
np_test.assert_equal(mrc_new.labels[6][:len(desired)], desired)
# test raw file
raw = ImageIO()
raw.read(
file=self.raw_file_name, dataType=self.raw_dtype,
shape=self.raw_shape)
np_test.assert_equal(raw.data, self.raw_data)
np_test.assert_equal(raw.memmap, False)
# test raw file with memmap
raw = ImageIO()
raw.read(
file=self.raw_file_name, dataType=self.raw_dtype,
shape=self.raw_shape, memmap=True)
np_test.assert_equal(raw.data, self.raw_data)
np_test.assert_equal(raw.memmap, True)
def testWrite(self):
"""
Tests write (and implicitly read), for em, mrc and raw format.
"""
# arrays
ar_uint8 = numpy.array([54, 200, 5, 7, 45, 123],
dtype='uint8').reshape((3,1,2))
ar_int8 = numpy.array([54, 2, -5, 7, 45, 123],
dtype='uint8').reshape((3,1,2))
ar_uint16 = numpy.array([1034, 546, 248, 40000, 2345, 365, 4876, 563],
dtype='uint16').reshape((2,2,2))
ar_int16 = numpy.array([1034, 546, -248, 156, 2345, 365, -4876, 563],
dtype='int16').reshape((2,2,2))
ar_int32 = numpy.array([1034, 56546, -223448, 156,
2345, 2**31-10, -884876, 563],
dtype='int32').reshape((2,2,2))
ar_uint32 = numpy.array([1034, 56546, 223448, 156,
2345, 365, 884876, 2**32-10],
dtype='uint32').reshape((2,2,2))
ar_int8_2 = numpy.arange(24, dtype='int8').reshape((4,3,2))
ar_int16_2 = numpy.arange(24, dtype='int16').reshape((4,3,2))
ar2_int16 = numpy.array([1034, 546, -248, 156, 2345, 365, -4876, 563],
dtype='int16').reshape((2,4))
ar_int16_f = numpy.array(
[1034, 546, -248, 156, 2345, 365, -4876, 563],
dtype='int16', order='F').reshape((2,2,2))
ar_int16_c = numpy.array(
[1034, 546, -248, 156, 2345, 365, -4876, 563],
dtype='int16', order='C').reshape((2,2,2))
# em uint8
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.em'), data=ar_uint8)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'uint8')
np_test.assert_equal(file_in.data, ar_uint8)
# em uint16
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.em'), data=ar_uint16)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'uint16')
np_test.assert_equal(file_in.data, ar_uint16)
# em int16 converted to int32, safe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_int16, dataType='int32', casting='safe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'int32')
np_test.assert_equal(file_in.data, ar_int16)
# em int16, safe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_int16, 'casting':'safe'})
# em int16 converted to uint16, unsafe casting
file_out = ImageIO()
print("int16 to uint16")
file_out.write(file=os.path.join(self.dir, '_test.em'),
data=ar_int16, dataType='uint16', casting='unsafe')
print("int16 to uint16 end")
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'uint16')
np_test.assert_equal(file_in.data.dtype, numpy.dtype('uint16'))
np_test.assert_equal(file_in.data[0,1,0] == ar_int16[0,1,0], False)
# em int16 to uint16, safe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_int16, 'dataType':'uint16', 'casting':'safe'})
# em uint16 to int16, unsafe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_uint16, 'dataType':'int16', 'casting':'unsafe'})
# em uint32 to int32, safe casting
print("uint32 to int32 safe")
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_uint32, 'dataType':'int32', 'casting':'safe'})
# em uint32 converted to int32, unsafe casting
print("uint32 to int32")
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_uint32, dataType='int32', casting='unsafe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'int32')
#np_test.assert_equal(file_in.data, ar_uint32) should fail
np_test.assert_equal(file_in.data[0,0,0] == ar_uint32[0,0,0], True)
np_test.assert_equal(file_in.data[1,1,1] == ar_uint32[1,1,1], False)
# em uint32 to float32, safe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_uint32, 'dataType':'float32', 'casting':'safe'})
# em uint32 to float32, unsafe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_uint32, dataType='float32', casting='unsafe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'float32')
#np_test.assert_almost_equal(file_in.data, ar_uint32) should fail
np_test.assert_equal(
file_in.data[0,0,0] == ar_uint32[0,0,0], True)
np_test.assert_equal(
file_in.data[1,1,1] == ar_uint32[1,1,1], False)
# em int32 to float32, unsafe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_int32, dataType='float32', casting='unsafe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'float32')
#np_test.assert_almost_equal(file_in.data, ar_int32) should fail
np_test.assert_equal(
file_in.data[0,0,0] == ar_int32[0,0,0], True)
np_test.assert_equal(
file_in.data[1,0,1] == ar_int32[1,0,1], False)
# em int32 to float64, safe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_int32, dataType='float64', casting='safe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'float64')
np_test.assert_almost_equal(file_in.data, ar_int32)
# mrc data type and shape from args
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int8_2, shape=(2,3,4), dataType='int16')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.shape, (2,3,4))
# mrc data type and shape from previously given data
file_out = ImageIO()
file_out.setData(ar_int16_2)
file_out.write(file=os.path.join(self.dir, '_test.mrc'))
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.shape, (4,3,2))
# mrc data type and shape from attributes
file_out = ImageIO()
file_out.data = ar_int8_2
file_out.shape = (2,3,4)
file_out.dataType = 'int16'
file_out.write(file=os.path.join(self.dir, '_test.mrc'))
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.shape, (2,3,4))
# mrc data type and shape from data
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16_2)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.shape, (4,3,2))
# mrc uint8, same as ubyte
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.mrc'), data=ar_uint8)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'ubyte')
np_test.assert_almost_equal(file_in.data, ar_uint8)
# mrc uint16
file_out = ImageIO()
np_test.assert_raises(
(KeyError, TypeError),
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'), 'data':ar_uint16})
# mrc uint16 to int16, safe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'),
'data':ar_uint16, 'dataType':'ubyte', 'casting':'safe'})
# mrc uint16 to int16, unsafe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_uint16, dataType='int16', casting='unsafe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
#np_test.assert_almost_equal(file_in.data, ar_uint16) should fail
np_test.assert_equal(file_in.data[0,0,0] == ar_uint16[0,0,0], True)
np_test.assert_equal(file_in.data[0,1,1] == ar_uint16[0,1,1], False)
# mrc int16
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar_int16, pixel=2.3)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data, ar_int16)
np_test.assert_equal(file_in.pixel, [2.3, 2.3, 2.3])
np_test.assert_equal(file_in.pixelsize, 2.3)
# mrc int16 2D
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar2_int16, pixel=3.4)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data[:,:,0], ar2_int16)
np_test.assert_equal(file_in.pixelsize, 3.4)
# mrc int8 to int16
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar_int8,
dataType='int16', casting='safe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data, ar_int8)
# mrc int32
file_out = ImageIO()
np_test.assert_raises(
(KeyError, TypeError),
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'), 'data':ar_int32})
# mrc int32 to int16
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'),
'data':ar_int32, 'dataType':'int16', 'casting':'safe'})
# mrc int32 to float32
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'),
'data':ar_int32, 'dataType':'float32', 'casting':'safe'})
# mrc int32 to complex64
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'),
'data':ar_int32, 'dataType':'complex64', 'casting':'safe'})
# raw int16
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.raw'), data=ar_int16)
file_in = ImageIO()
file_in.read(
file=os.path.join(self.dir, '_test.raw'),
dataType='int16', shape=(2,2,2))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data, ar_int16)
# raw int8 to int16
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.raw'),
data=ar_int8, dataType='int16')
file_in = ImageIO()
file_in.read(
file=os.path.join(self.dir, '_test.raw'),
dataType='int16', shape=(3,1,2))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data, ar_int8)
# raw int16 to int8
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.raw'),
'data':ar_int16, 'dataType':'int8', 'casting':'safe'})
# explain error messages printed before
print("It's fine if few error messages were printed just before " +
"this line, because they have been caught.")
# shape param
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16, dataType='int16')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'), dataType='int16')
np_test.assert_equal(file_in.data.shape, (2,2,2))
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16, dataType='int16', shape=(1,4,2))
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'), dataType='int16')
np_test.assert_equal(file_in.data.shape, (1,4,2))
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16, dataType='int16', shape=(4,2))
file_in.readHeader(file=os.path.join(self.dir, '_test.mrc'))
file_in.read(file=os.path.join(self.dir, '_test.mrc'), dataType='int16')
np_test.assert_equal(file_in.data.shape, (4,2,1))
file_in.read(
file=os.path.join(self.dir, '_test.mrc'),
dataType='int16', shape=(2,2,2))
np_test.assert_equal(file_in.data.shape, (2,2,2))
# array order C, read write default (F)
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar_int16_c)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.data, ar_int16_c)
# array order C, read write C
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16_c, arrayOrder='C')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'), arrayOrder='C')
np_test.assert_equal(file_in.data, ar_int16_c)
# array order F, read write default (F)
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar_int16_f)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.data, ar_int16_f)
# array order F, read write F
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16_f, arrayOrder='F')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'), arrayOrder='F')
np_test.assert_equal(file_in.data, ar_int16_f)
def testPixelSize(self):
"""
Tests pixel size in read and write
"""
# arrays
#ar_int8_2 = numpy.arange(24, dtype='int8').reshape((4,3,2))
ar_int16_2 = numpy.arange(24, dtype='int16').reshape((4,3,2))
#
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16_2, pixel=2.1)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_almost_equal(file_in.pixel, 2.1)
def tearDown(self):
"""
Remove temporary files
"""
try:
os.remove(os.path.join(self.dir, '_test.em'))
except OSError:
pass
try:
os.remove(os.path.join(self.dir, '_test.mrc'))
except OSError:
pass
try:
os.remove(os.path.join(self.dir, '_test.raw'))
except OSError:
pass
try:
os.remove(os.path.join(self.dir, self.raw_file_name))
except OSError:
pass
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestImageIO)
unittest.TextTestRunner(verbosity=2).run(suite)
| 42.094005
| 91
| 0.55329
| 4,188
| 30,897
| 3.888013
| 0.074021
| 0.061168
| 0.120862
| 0.13468
| 0.855616
| 0.829024
| 0.808819
| 0.790948
| 0.773445
| 0.761776
| 0
| 0.090901
| 0.302845
| 30,897
| 733
| 92
| 42.151432
| 0.665042
| 0.066932
| 0
| 0.676522
| 0
| 0
| 0.068811
| 0.001537
| 0
| 0
| 0
| 0
| 0.278261
| 1
| 0.008696
| false
| 0.006957
| 0.017391
| 0
| 0.027826
| 0.010435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1cd875b375001a6fbd439f63afc553b39c54817a
| 143
|
py
|
Python
|
pyloads/__init__.py
|
GonMazzini/pyloads
|
77e03901667ee4e854f74cf8538b5ffb21418063
|
[
"MIT"
] | 2
|
2021-01-04T06:56:45.000Z
|
2021-01-27T17:27:50.000Z
|
pyloads/__init__.py
|
GonMazzini/pyloads
|
77e03901667ee4e854f74cf8538b5ffb21418063
|
[
"MIT"
] | null | null | null |
pyloads/__init__.py
|
GonMazzini/pyloads
|
77e03901667ee4e854f74cf8538b5ffb21418063
|
[
"MIT"
] | null | null | null |
from pyloads.static_loads import Rotor
from pyloads.aerodynamic_profiles import AeroProfiles
from pyloads.blade_data import BladeFeatures
| 15.888889
| 53
| 0.86014
| 18
| 143
| 6.666667
| 0.666667
| 0.275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118881
| 143
| 8
| 54
| 17.875
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e818b8c4203ee8f10f2fdac847d8d14443a65c16
| 7,488
|
py
|
Python
|
applications/Ma-Net/networks/loss.py
|
Simon-liusheng/PaddleVideo
|
6c35b68bc745c659813d6517eecade9c9508a628
|
[
"Apache-2.0"
] | 1
|
2022-02-19T23:50:49.000Z
|
2022-02-19T23:50:49.000Z
|
applications/Ma-Net/networks/loss.py
|
liutinglong/PaddleVideo
|
6b8a723360ac652ca7aafa1908e6c67a67cf5ea5
|
[
"Apache-2.0"
] | 1
|
2022-01-14T02:33:28.000Z
|
2022-01-14T02:33:28.000Z
|
applications/Ma-Net/networks/loss.py
|
Thinksky5124/PaddleVideo
|
c8e9c5ff53d99bd70bfeb6246a53e668064a9940
|
[
"Apache-2.0"
] | null | null | null |
import paddle
import paddle.nn as nn
import os
class Added_BCEWithLogitsLoss(nn.Layer):
def __init__(self,
top_k_percent_pixels=None,
hard_example_mining_step=100000):
super(Added_BCEWithLogitsLoss, self).__init__()
self.top_k_percent_pixels = top_k_percent_pixels
if top_k_percent_pixels is not None:
assert (top_k_percent_pixels > 0 and top_k_percent_pixels < 1)
self.hard_example_mining_step = hard_example_mining_step
if self.top_k_percent_pixels == None:
self.bceloss = nn.BCEWithLogitsLoss(reduction='mean')
else:
self.bceloss = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, dic_tmp, y, step):
final_loss = 0
for seq_name in dic_tmp.keys():
pred_logits = dic_tmp[seq_name]
gts = y[seq_name]
if self.top_k_percent_pixels == None:
final_loss += self.bceloss(pred_logits, gts)
else:
# Only compute the loss for top k percent pixels.
# First, compute the loss for all pixels. Note we do not put the loss
# to loss_collection and set reduction = None to keep the shape.
num_pixels = float(pred_logits.shape[2] * pred_logits.shape[3])
pred_logits = pred_logits.view(
-1, pred_logits.shape[1],
pred_logits.shape[2] * pred_logits.shape[3])
gts = gts.view(-1, gts.shape[1], gts.shape[2] * gts.shape[3])
pixel_losses = self.bceloss(pred_logits, gts)
if self.hard_example_mining_step == 0:
top_k_pixels = int(self.top_k_percent_pixels * num_pixels)
else:
ratio = min(1.0,
step / float(self.hard_example_mining_step))
top_k_pixels = int((ratio * self.top_k_percent_pixels +
(1.0 - ratio)) * num_pixels)
_, top_k_indices = paddle.topk(pixel_losses,
k=top_k_pixels,
axis=2)
final_loss += nn.BCEWithLogitsLoss(weight=top_k_indices,
reduction='mean')(
pred_logits, gts)
return final_loss
class Added_CrossEntropyLoss(nn.Layer):
def __init__(self,
top_k_percent_pixels=None,
hard_example_mining_step=100000):
super(Added_CrossEntropyLoss, self).__init__()
self.top_k_percent_pixels = top_k_percent_pixels
if top_k_percent_pixels is not None:
assert (top_k_percent_pixels > 0 and top_k_percent_pixels < 1)
self.hard_example_mining_step = hard_example_mining_step
if self.top_k_percent_pixels == None:
self.celoss = nn.CrossEntropyLoss(ignore_index=255,
reduction='mean')
else:
self.celoss = nn.CrossEntropyLoss(ignore_index=255,
reduction='none')
def forward(self, dic_tmp, y, step):
final_loss = 0
for seq_name in dic_tmp.keys():
pred_logits = dic_tmp[seq_name]
gts = y[seq_name]
if self.top_k_percent_pixels == None:
final_loss += self.celoss(pred_logits, gts)
else:
# Only compute the loss for top k percent pixels.
# First, compute the loss for all pixels. Note we do not put the loss
# to loss_collection and set reduction = None to keep the shape.
num_pixels = float(pred_logits.shape[2] * pred_logits.shape[3])
pred_logits = pred_logits.reshape([
pred_logits.shape[1],
pred_logits.shape[2] * pred_logits.shape[3]
]).transpose([1, 0])
gts = gts.reshape([gts.shape[1] * gts.shape[2]])
pixel_losses = self.celoss(pred_logits, gts).reshape([1, -1])
if self.hard_example_mining_step == 0:
top_k_pixels = int(self.top_k_percent_pixels * num_pixels)
else:
ratio = min(1.0,
step / float(self.hard_example_mining_step))
top_k_pixels = int((ratio * self.top_k_percent_pixels +
(1.0 - ratio)) * num_pixels)
top_k_loss, top_k_indices = paddle.topk(pixel_losses,
k=top_k_pixels,
axis=1)
final_loss += paddle.mean(top_k_loss)
return final_loss
class AddedEdge_CrossEntropyLoss(nn.Layer):
def __init__(self,
top_k_percent_pixels=None,
hard_example_mining_step=100000):
super(AddedEdge_CrossEntropyLoss, self).__init__()
self.top_k_percent_pixels = top_k_percent_pixels
if top_k_percent_pixels is not None:
assert (top_k_percent_pixels > 0 and top_k_percent_pixels < 1)
self.hard_example_mining_step = hard_example_mining_step
self.celoss = None
def forward(self, pred_logits, gts, step):
pos_num = paddle.sum(gts == 1, dtype='float32')
neg_num = paddle.sum(gts == 0, dtype='float32')
weight_pos = neg_num / (pos_num + neg_num)
weight_neg = pos_num / (pos_num + neg_num)
weights = paddle.to_tensor([weight_neg, weight_pos])
if self.top_k_percent_pixels == None:
sig_pred_logits = paddle.nn.functional.sigmoid(pred_logits)
self.bceloss = nn.BCEWithLogitsLoss(pos_weight=weight_pos,
reduction='mean')
if paddle.sum(gts) == 0:
dcloss = 0
else:
dcloss = (paddle.sum(sig_pred_logits * sig_pred_logits) +
paddle.sum(gts * gts)) / (
paddle.sum(2 * sig_pred_logits * gts) + 1e-5)
final_loss = 0.1 * self.bceloss(pred_logits, gts) + dcloss
else:
self.celoss = nn.CrossEntropyLoss(weight=weights,
ignore_index=255,
reduction='none')
num_pixels = float(pred_logits.shape[2] * pred_logits.shape[3])
pred_logits = pred_logits.view(
-1, pred_logits.shape[1],
pred_logits.shape[2] * pred_logits.shape[3])
gts = gts.view(-1, gts.shape[2] * gts.shape[3])
pixel_losses = self.celoss(pred_logits, gts)
if self.hard_example_mining_step == 0:
top_k_pixels = int(self.top_k_percent_pixels * num_pixels)
else:
ratio = min(1.0, step / float(self.hard_example_mining_step))
top_k_pixels = int((ratio * self.top_k_percent_pixels +
(1.0 - ratio)) * num_pixels)
top_k_loss, top_k_indices = paddle.topk(pixel_losses,
k=top_k_pixels,
axis=1)
final_loss = paddle.mean(top_k_loss)
return final_loss
| 48.623377
| 85
| 0.538061
| 879
| 7,488
| 4.248009
| 0.11149
| 0.051419
| 0.091323
| 0.141136
| 0.819497
| 0.765399
| 0.760578
| 0.742635
| 0.7188
| 0.7188
| 0
| 0.020694
| 0.380475
| 7,488
| 153
| 86
| 48.941176
| 0.784221
| 0.047676
| 0
| 0.641791
| 0
| 0
| 0.005896
| 0
| 0
| 0
| 0
| 0
| 0.022388
| 1
| 0.044776
| false
| 0
| 0.022388
| 0
| 0.11194
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1c14b01699893af15c95d36878d94ede3b2b8cc7
| 49
|
py
|
Python
|
prototype/test/pythonvm_book/test_if.py
|
zoloypzuo/ZeloPy
|
43d9242a509737fe1bb66deba73aa9e749b53c62
|
[
"MIT"
] | null | null | null |
prototype/test/pythonvm_book/test_if.py
|
zoloypzuo/ZeloPy
|
43d9242a509737fe1bb66deba73aa9e749b53c62
|
[
"MIT"
] | null | null | null |
prototype/test/pythonvm_book/test_if.py
|
zoloypzuo/ZeloPy
|
43d9242a509737fe1bb66deba73aa9e749b53c62
|
[
"MIT"
] | null | null | null |
if 2 > 1:
print 2
else:
print 1
print 3
| 7
| 11
| 0.530612
| 10
| 49
| 2.6
| 0.6
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 0.408163
| 49
| 6
| 12
| 8.166667
| 0.724138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.6
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
1c15e75e09765002801a510caa2b7f76dde6c19e
| 182
|
py
|
Python
|
src/lib/trains/train_factory.py
|
EvelynYihuiYang/MCMOT
|
8ea20b57d836cc8f8efe1b13dead3e5d8511c16d
|
[
"MIT"
] | 306
|
2020-05-29T06:59:37.000Z
|
2022-03-23T06:00:55.000Z
|
src/lib/trains/train_factory.py
|
EvelynYihuiYang/MCMOT
|
8ea20b57d836cc8f8efe1b13dead3e5d8511c16d
|
[
"MIT"
] | 92
|
2020-06-26T10:15:25.000Z
|
2022-03-27T11:46:31.000Z
|
src/lib/trains/train_factory.py
|
EvelynYihuiYang/MCMOT
|
8ea20b57d836cc8f8efe1b13dead3e5d8511c16d
|
[
"MIT"
] | 79
|
2020-06-22T03:14:34.000Z
|
2022-03-17T08:09:13.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .mot import MotTrainer
train_factory = {
'mot': MotTrainer,
}
| 18.2
| 38
| 0.802198
| 22
| 182
| 5.954545
| 0.5
| 0.229008
| 0.366412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 182
| 9
| 39
| 20.222222
| 0.850649
| 0
| 0
| 0
| 0
| 0
| 0.016484
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.571429
| 0
| 0.571429
| 0.142857
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
98bd5c789b123af45bb3be41cc82a7e27ed568f1
| 27
|
py
|
Python
|
build/lib/rowingdata/__init__.py
|
sanderroosendaal/rowingdata
|
efd8aa1566a926f11fb3f6b5b340665bc26028c4
|
[
"MIT"
] | 4
|
2017-04-24T15:20:46.000Z
|
2021-02-12T23:03:29.000Z
|
rowingdata/__init__.py
|
sanderroosendaal/rowingdata
|
efd8aa1566a926f11fb3f6b5b340665bc26028c4
|
[
"MIT"
] | 38
|
2016-11-02T07:57:50.000Z
|
2022-01-22T13:25:14.000Z
|
build/lib/rowingdata/__init__.py
|
sanderroosendaal/rowingdata
|
efd8aa1566a926f11fb3f6b5b340665bc26028c4
|
[
"MIT"
] | 6
|
2017-01-19T21:39:46.000Z
|
2021-11-16T14:48:58.000Z
|
from .rowingdata import *
| 9
| 25
| 0.740741
| 3
| 27
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 27
| 2
| 26
| 13.5
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
98fda314abac492789d82fa79773a04e1a203504
| 36
|
py
|
Python
|
tests/unit/cli/test_archives.py
|
tehlingchu/anchore-cli
|
b0df36337f443749991a49263227c1d40989debb
|
[
"Apache-2.0"
] | 110
|
2017-09-14T02:15:15.000Z
|
2022-03-30T20:14:21.000Z
|
tests/unit/cli/test_archives.py
|
tehlingchu/anchore-cli
|
b0df36337f443749991a49263227c1d40989debb
|
[
"Apache-2.0"
] | 115
|
2017-09-22T12:15:30.000Z
|
2022-01-17T12:31:21.000Z
|
tests/unit/cli/test_archives.py
|
tehlingchu/anchore-cli
|
b0df36337f443749991a49263227c1d40989debb
|
[
"Apache-2.0"
] | 56
|
2017-09-22T11:26:25.000Z
|
2022-03-03T14:14:58.000Z
|
from anchorecli.cli import archives
| 18
| 35
| 0.861111
| 5
| 36
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c718beea4e0b0d05c44c0091939d68a37dbbedb3
| 27
|
py
|
Python
|
discordtextsanitizer/__init__.py
|
mikeshardmind/discord-text-sanitizer
|
3a842f622abe29c1d9a2bb41b5782a178272f166
|
[
"MIT"
] | null | null | null |
discordtextsanitizer/__init__.py
|
mikeshardmind/discord-text-sanitizer
|
3a842f622abe29c1d9a2bb41b5782a178272f166
|
[
"MIT"
] | null | null | null |
discordtextsanitizer/__init__.py
|
mikeshardmind/discord-text-sanitizer
|
3a842f622abe29c1d9a2bb41b5782a178272f166
|
[
"MIT"
] | null | null | null |
from ._sanitizers import *
| 13.5
| 26
| 0.777778
| 3
| 27
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c72aba9f4de44d7ea8672629b92cccf91c5614db
| 7,801
|
py
|
Python
|
Lv1_data_bin.py
|
masonng-astro/nicerpy_xrayanalysis
|
c21c7c9bc5570c63c986197fb363ae80691515d5
|
[
"MIT"
] | 3
|
2020-01-13T20:13:14.000Z
|
2021-06-03T21:58:08.000Z
|
Lv1_data_bin.py
|
masonng-astro/nicerpy_xrayanalysis
|
c21c7c9bc5570c63c986197fb363ae80691515d5
|
[
"MIT"
] | null | null | null |
Lv1_data_bin.py
|
masonng-astro/nicerpy_xrayanalysis
|
c21c7c9bc5570c63c986197fb363ae80691515d5
|
[
"MIT"
] | 2
|
2020-01-15T15:08:40.000Z
|
2021-07-09T11:49:30.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tues Jan 8 2:11pm 2019
Extracting the GTIs from the FITS files. Use the event_cl files.
"""
from __future__ import division, print_function
from astropy.io import fits
import numpy as np
import Lv0_dirs,Lv1_data_filter
from scipy import stats
import matplotlib.pyplot as plt
def binning_t(eventfile,par_list,tbin_size,t1,t2):
"""
Binning routine for when I truncate the data by JUST time interval.
Got to make sure I have TIME and PI called!
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
par_list - A list of parameters we'd like to extract from the FITS file
(e.g., from eventcl, PI_FAST, TIME, PI,)
tbin_size - the size of the time bins (in seconds!)
>> e.g., tbin_size = 2 means bin by 2s
>> e.g., tbin_size = 0.05 means bin by 0.05s!
t1 - lower time boundary
t2 - upper time boundary
"""
if type(eventfile) != str:
raise TypeError("eventfile should be a string!")
if type(tbin_size) != int and type(tbin_size) != np.float:
raise TypeError("tbin_size should be a float or integer!")
if 'PI' and 'TIME' not in par_list:
raise ValueError("You should have BOTH 'PI' and 'TIME' in the parameter list!")
if type(par_list) != list and type(par_list) != np.ndarray:
raise TypeError("par_list should either be a list or an array!")
truncated_t = Lv1_data_filter.filter_time(eventfile,par_list,t1,t2)
counts = np.ones(len(truncated_t))
startt = int(t1)
endt = int(t2)
t_bins = np.linspace(startt,endt,int((endt-startt)*1/tbin_size+1)) #getting an array of time values for the bins
summed_data, bin_edges, binnumber = stats.binned_statistic(truncated_t,counts,statistic='sum',bins=t_bins) #binning the counts in the data
print("The data is binned by " + str(tbin_size) + 's')
return t_bins, summed_data
def binning_E(eventfile,par_list,tbin_size,Ebin_size,E1,E2):
"""
Binning routine for when I truncate the data by JUST energy range.
Got to make sure I have TIME and PI called!
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
par_list - A list of parameters we'd like to extract from the FITS file
(e.g., from eventcl, PI_FAST, TIME, PI,)
tbin_size - the size of the time bins (in seconds!)
>> e.g., tbin_size = 2 means bin by 2s
>> e.g., tbin_size = 0.05 means bin by 0.05s!
Ebin_size - the size of the energy bins (in keV!)
>> e.g., Ebin_size = 0.1 means bin by 0.1keV
>> e.g., Ebin_size = 0.05 means bin by 0.05keV
E1 - lower energy boundary
E2 - upper energy boundary
"""
if type(eventfile) != str:
raise TypeError("eventfile should be a string!")
if type(tbin_size) != int and type(tbin_size) != np.float:
raise TypeError("tbin_size should be a float or integer!")
if type(Ebin_size) != int and type(Ebin_size) != np.float:
raise TypeError("Ebin_size should be a float or integer!")
if 'PI' and 'TIME' not in par_list:
raise ValueError("You should have BOTH 'PI' and 'TIME' in the parameter list!")
if type(par_list) != list and type(par_list) != np.ndarray:
raise TypeError("par_list should either be a list or an array!")
truncated_t, truncated_E = Lv1_data_filter.filter_energy(eventfile,par_list,E1,E2)
counts = np.ones(len(truncated_t))
startt = int(truncated_t[0])
endt = np.ceil(truncated_t[-1])
t_bins = np.linspace(startt,endt,int((endt-startt)*1/tbin_size+1)) #getting an array of time values for the bins
summed_data_t, bin_edges, binnumber = stats.binned_statistic(truncated_t,counts,statistic='sum',bins=t_bins) #binning the time values in the data
if E1 < 1: #if less than 1keV, the binning for 0.3-1keV is slightly different.
E_bins = np.linspace(E1,E2,int((E2-E1)*1/Ebin_size+2)) #getting an array of energy values for the bins
else:
E_bins = np.linspace(E1,E2,int((E2-E1)*1/Ebin_size+1)) #getting an array of energy values for the bins
summed_data_E, bin_edges, binnumber = stats.binned_statistic(truncated_E,counts,statistic='sum',bins=E_bins) #binning the energy values in the data
print("The data is binned by " + str(tbin_size) + 's, and ' + str(Ebin_size) + 'keV')
return t_bins, summed_data_t, E_bins, summed_data_E
def binning_tE(eventfile,par_list,tbin_size,Ebin_size,t1,t2,E1,E2):
"""
Binning routine for when I truncated the data by BOTH time interval AND energy range.
Got to make sure I have TIME and PI called!
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
par_list - A list of parameters we'd like to extract from the FITS file
(e.g., from eventcl, PI_FAST, TIME, PI,)
tbin_size - the size of the time bins (in seconds!)
>> e.g., tbin_size = 2 means bin by 2s
>> e.g., tbin_size = 0.05 means bin by 0.05s!
Ebin_size - the size of the energy bins (in keV!)
>> e.g., Ebin_size = 0.1 means bin by 0.1keV
>> e.g., Ebin_size = 0.05 means bin by 0.05keV
t1 - lower time boundary
t2 - upper time boundary
E1 - lower energy boundary
E2 - upper energy boundary
"""
if type(eventfile) != str:
raise TypeError("eventfile should be a string!")
if type(tbin_size) != int and type(tbin_size) != np.float:
raise TypeError("tbin_size should be a float or integer!")
if type(Ebin_size) != int and type(Ebin_size) != np.float:
raise TypeError("Ebin_size should be a float or integer!")
if 'PI' and 'TIME' not in par_list:
raise ValueError("You should have BOTH 'PI' and 'TIME' in the parameter list!")
if type(par_list) != list and type(par_list) != np.ndarray:
raise TypeError("par_list should either be a list or an array!")
if t2<t1:
raise ValueError("t2 should be greater than t1!")
if E2<E1:
raise ValueError("E2 should be greater than E1!")
truncated_t, truncated_E = Lv1_data_filter.filter_data(eventfile,par_list,t1,t2,E1,E2)
counts = np.ones(len(truncated_t))
startt = int(t1)
endt = int(t2)
t_bins = np.linspace(startt,endt,(endt-startt)*1/tbin_size+1) #getting an array of time values for the bins
summed_data_t, bin_edges, binnumber = stats.binned_statistic(truncated_t,counts,statistic='sum',bins=t_bins) #binning the time values in the data
if E1 < 1: #if less than 1keV, the binning for 0.3-1keV is slightly different.
E_bins = np.linspace(E1,E2,(E2-E1)*1/Ebin_size+2) #getting an array of energy values for the bins
else:
E_bins = np.linspace(E1,E2,(E2-E1)*1/Ebin_size+1) #getting an array of energy values for the bins
summed_data_E, bin_edges, binnumber = stats.binned_statistic(truncated_E,counts,statistic='sum',bins=E_bins) #binning the energy values in the data
print("The data is binned by " + str(tbin_size) + 's, and ' + str(Ebin_size) + 'keV')
return t_bins, summed_data_t, E_bins, summed_data_E
if __name__ == "__main__":
obsid = '1034070101'
eventfile = Lv0_dirs.NICER_DATADIR + obsid + '/xti/event_cl/ni' + obsid + '_0mpu7_cl_bary.evt'
par_list = ['TIME','PI','PI_RATIO']
t1 = 0
t2 = 300
E1 = 0.3
E2 = 6
tbin_size = 1
Ebin_size = 0.05
tbins,summed_data = binning_t(eventfile,par_list,tbin_size,t1,t2)
#print(len(tbins),len(summed_data))
tbins,summed_t_data,Ebins,summed_E_data = binning_E(eventfile,par_list,tbin_size,Ebin_size,E1,E2)
#print(len(tbins),len(summed_t_data),len(Ebins),len(summed_E_data))
tbins,summed_t_data,Ebins,summed_E_data = binning_tE(eventfile,par_list,tbin_size,Ebin_size,t1,t2,E1,E2)
#print(len(tbins),len(summed_t_data),len(Ebins),len(summed_E_data))
| 46.712575
| 151
| 0.689399
| 1,333
| 7,801
| 3.885221
| 0.126782
| 0.047886
| 0.019309
| 0.014868
| 0.876038
| 0.860012
| 0.860012
| 0.855764
| 0.829504
| 0.815601
| 0
| 0.027979
| 0.202795
| 7,801
| 166
| 152
| 46.993976
| 0.804792
| 0.357134
| 0
| 0.556818
| 0
| 0
| 0.174474
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034091
| false
| 0
| 0.068182
| 0
| 0.136364
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c72dd5e6413fc77787b5ef6104241242682c1a54
| 2,482
|
py
|
Python
|
apps/accounts/tests/test_models.py
|
Intellia-SME/OptiPLANT
|
1d40b62f00b3fff940499fa27d0c2d59e7e6dd4c
|
[
"Apache-2.0"
] | 1
|
2022-01-26T18:07:22.000Z
|
2022-01-26T18:07:22.000Z
|
apps/accounts/tests/test_models.py
|
Intellia-SME/OptiPLANT
|
1d40b62f00b3fff940499fa27d0c2d59e7e6dd4c
|
[
"Apache-2.0"
] | null | null | null |
apps/accounts/tests/test_models.py
|
Intellia-SME/OptiPLANT
|
1d40b62f00b3fff940499fa27d0c2d59e7e6dd4c
|
[
"Apache-2.0"
] | 1
|
2022-01-26T18:07:26.000Z
|
2022-01-26T18:07:26.000Z
|
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.test import TestCase
UserModel = get_user_model()
class CustomUserTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = UserModel.objects.create_user(username='guest', email="guest@guest.gr")
def test_username_is_mandatory(self):
with self.assertRaises(ValidationError) as e:
UserModel.objects.create(email="guest2@guest.gr", password=self.user.password)
self.assertEqual(e.exception.messages[0], 'This field cannot be blank.')
def test_username_is_unique(self):
with self.assertRaises(ValidationError) as e:
UserModel.objects.create(username=self.user.username, email="guest2@guest.gr", password=self.user.password)
self.assertEqual(e.exception.messages[0], 'A user with that username already exists.')
def test_username_is_case_insensitive(self):
with self.assertRaises(ValidationError) as e:
UserModel.objects.create(
username=self.user.username.upper(), email="guest2@guest.gr", password=self.user.password
)
self.assertEqual(e.exception.messages[0], 'A user with that username already exists.')
def test_username_is_not_unicode_based(self):
with self.assertRaises(ValidationError) as e:
UserModel.objects.create(
username=self.user.username + "¬", email="guest2@guest.gr", password=self.user.password
)
self.assertTrue('Enter a valid username.' in e.exception.messages[0])
def test_email_is_mandatory(self):
with self.assertRaises(ValidationError) as e:
UserModel.objects.create(username="guest1", password=self.user.password)
self.assertEqual(e.exception.messages[0], 'This field cannot be blank.')
def test_email_is_unique(self):
with self.assertRaises(ValidationError) as e:
UserModel.objects.create(username="guest2", email=self.user.email, password=self.user.password)
self.assertEqual(e.exception.messages[0], 'A user with that email address already exists.')
def test_email_is_case_insensitive(self):
with self.assertRaises(ValidationError) as e:
UserModel.objects.create(username="guest2", email=self.user.email.upper(), password=self.user.password)
self.assertEqual(e.exception.messages[0], 'A user with that email address already exists.')
| 48.666667
| 119
| 0.710314
| 311
| 2,482
| 5.575563
| 0.199357
| 0.096886
| 0.101499
| 0.096886
| 0.77624
| 0.77624
| 0.77624
| 0.77624
| 0.77624
| 0.749712
| 0
| 0.006886
| 0.180903
| 2,482
| 50
| 120
| 49.64
| 0.845548
| 0
| 0
| 0.375
| 0
| 0
| 0.140612
| 0
| 0
| 0
| 0
| 0
| 0.35
| 1
| 0.2
| false
| 0.175
| 0.075
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
c74d105b2c83507858c856fc2580011a228f63dd
| 2,062
|
py
|
Python
|
app.py
|
codesydney/censusplus_nsw_employmentrate
|
882e6f0986c6456c16240e5af6f23c3025042710
|
[
"CC-BY-4.0"
] | null | null | null |
app.py
|
codesydney/censusplus_nsw_employmentrate
|
882e6f0986c6456c16240e5af6f23c3025042710
|
[
"CC-BY-4.0"
] | null | null | null |
app.py
|
codesydney/censusplus_nsw_employmentrate
|
882e6f0986c6456c16240e5af6f23c3025042710
|
[
"CC-BY-4.0"
] | null | null | null |
###########################################################################
# Modified the table for Employment Rate- Albert Molina 13-03-2018 #
###########################################################################
from flask import Flask, g, request, jsonify
from database import get_db
app = Flask(__name__)
@app.route('/details', methods=['GET'])
def get_details():
db = get_db()
details_cur = db.execute('select YEAR, LOCALITY, SUBURB, STATE, POSTCODE, EMPLOYED, UNEMPLOYED from NSW_EMPLOYMENT_RATE')
details = details_cur.fetchall()
return_values = []
for detail in details:
detail_dict = {}
detail_dict['YEAR'] = detail['YEAR']
detail_dict['LOCALITY'] = detail['LOCALITY']
detail_dict['SUBURB'] = detail['SUBURB']
detail_dict['STATE'] = detail['STATE']
detail_dict['POSTCODE'] = detail['POSTCODE']
detail_dict['EMPLOYED'] = detail['EMPLOYED']
detail_dict['UNEMPLOYED'] = detail['UNEMPLOYED']
return_values.append(detail_dict)
return jsonify({'details' : return_values})
@app.route('/details/<string:SUBURB>', methods=['GET'])
def get_detail(SUBURB):
db = get_db()
details_cur = db.execute('select YEAR, LOCALITY, SUBURB, STATE, POSTCODE, EMPLOYED, UNEMPLOYED from NSW_EMPLOYMENT_RATE where SUBURB = ?', [SUBURB])
details = details_cur.fetchall()
return_values = []
for detail in details:
detail_dict = {}
detail_dict['YEAR'] = detail['YEAR']
detail_dict['LOCALITY'] = detail['LOCALITY']
detail_dict['SUBURB'] = detail['SUBURB']
detail_dict['STATE'] = detail['STATE']
detail_dict['POSTCODE'] = detail['POSTCODE']
detail_dict['EMPLOYED'] = detail['EMPLOYED']
detail_dict['UNEMPLOYED'] = detail['UNEMPLOYED']
return_values.append(detail_dict)
return jsonify({'details' : return_values})
if __name__ == '__main__':
app.run(debug=True)
| 36.821429
| 153
| 0.572745
| 207
| 2,062
| 5.468599
| 0.246377
| 0.159011
| 0.026502
| 0.028269
| 0.772085
| 0.772085
| 0.772085
| 0.772085
| 0.772085
| 0.772085
| 0
| 0.005031
| 0.228904
| 2,062
| 56
| 154
| 36.821429
| 0.706918
| 0.031038
| 0
| 0.717949
| 0
| 0
| 0.257431
| 0.01346
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.051282
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c766fa0664a0fe1a17c4d8004a13b1ebf1a62a44
| 194
|
py
|
Python
|
tests/test_data.py
|
jstaf/gapminder
|
54606882845ecebc3523c9602d17c78a968a2700
|
[
"BSD-3-Clause"
] | 3
|
2018-09-27T02:09:10.000Z
|
2021-07-29T02:13:48.000Z
|
tests/test_data.py
|
jstaf/gapminder
|
54606882845ecebc3523c9602d17c78a968a2700
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_data.py
|
jstaf/gapminder
|
54606882845ecebc3523c9602d17c78a968a2700
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Just make sure the data can be loaded on all supported Python versions.
'''
def test_load_gapminder():
from gapminder import gapminder
assert gapminder.iloc[0, 0] == 'Afghanistan'
| 21.555556
| 71
| 0.721649
| 27
| 194
| 5.111111
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0.185567
| 194
| 8
| 72
| 24.25
| 0.860759
| 0.365979
| 0
| 0
| 0
| 0
| 0.096491
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c78928ef494f86c9bcba298be7663c1e71b4ad1f
| 32,292
|
py
|
Python
|
skyline_apiserver/policy/manager/trove.py
|
openstack/skyline-apiserver
|
60144767cd5513bd581fbb8eac7791887d5b276f
|
[
"Apache-2.0"
] | null | null | null |
skyline_apiserver/policy/manager/trove.py
|
openstack/skyline-apiserver
|
60144767cd5513bd581fbb8eac7791887d5b276f
|
[
"Apache-2.0"
] | null | null | null |
skyline_apiserver/policy/manager/trove.py
|
openstack/skyline-apiserver
|
60144767cd5513bd581fbb8eac7791887d5b276f
|
[
"Apache-2.0"
] | null | null | null |
from . import base
list_rules = (
base.Rule(
name="admin",
check_str=("role:admin or is_admin:True"),
description="Must be an administrator.",
),
base.Rule(
name="admin_or_owner",
check_str=("rule:admin or project_id:%(tenant)s"),
description="Must be an administrator or owner of the object.",
),
base.Rule(
name="default",
check_str=("rule:admin_or_owner"),
description="Must be an administrator or owner of the object.",
),
base.APIRule(
name="trove:instance:create",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Create a database instance.",
scope_types=["project"],
operations=[{"method": "POST", "path": "/v1.0/{account_id}/instances"}],
),
base.APIRule(
name="trove:instance:delete",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Delete a database instance.",
scope_types=["project"],
operations=[{"method": "DELETE", "path": "/v1.0/{account_id}/instances/{instance_id}"}],
),
base.APIRule(
name="trove:instance:force_delete",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Forcibly delete a database instance.",
scope_types=["project"],
operations=[{"method": "DELETE", "path": "/v1.0/{account_id}/instances/{instance_id}"}],
),
base.APIRule(
name="trove:instance:index",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="List database instances.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/instances"}],
),
base.APIRule(
name="trove:instance:detail",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="List database instances with details.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/instances/detail"}],
),
base.APIRule(
name="trove:instance:show",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get details of a specific database instance.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/instances/{instance_id}"}],
),
base.APIRule(
name="trove:instance:update",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Update a database instance to attach/detach configuration",
scope_types=["project"],
operations=[
{"method": "PUT", "path": "/v1.0/{account_id}/instances/{instance_id}"},
{"method": "POST", "path": "/v1.0/{account_id}/instances"},
],
),
base.APIRule(
name="trove:instance:edit",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Updates the instance to set or unset one or more attributes.",
scope_types=["project"],
operations=[{"method": "PATCH", "path": "/v1.0/{account_id}/instances/{instance_id}"}],
),
base.APIRule(
name="trove:instance:restart",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Restart a database instance.",
scope_types=["project"],
operations=[
{
"method": "POST",
"path": "/v1.0/{account_id}/instances/{instance_id}/action (restart)",
},
],
),
base.APIRule(
name="trove:instance:resize_volume",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Resize a database instance volume.",
scope_types=["project"],
operations=[
{
"method": "POST",
"path": "/v1.0/{account_id}/instances/{instance_id}/action (resize)",
},
],
),
base.APIRule(
name="trove:instance:resize_flavor",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Resize a database instance flavor.",
scope_types=["project"],
operations=[
{
"method": "POST",
"path": "/v1.0/{account_id}/instances/{instance_id}/action (resize)",
},
],
),
base.APIRule(
name="trove:instance:reset_status",
check_str=("(role:admin or is_admin:True)"),
description="Reset the status of a database instance to ERROR.",
scope_types=["project"],
operations=[
{
"method": "POST",
"path": "/v1.0/{account_id}/instances/{instance_id}/action (reset_status)",
},
],
),
base.APIRule(
name="trove:instance:promote_to_replica_source",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Promote instance to replica source.",
scope_types=["project"],
operations=[
{
"method": "POST",
"path": "/v1.0/{account_id}/instances/{instance_id}/action (promote_to_replica_source)", # noqa
},
],
),
base.APIRule(
name="trove:instance:eject_replica_source",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Eject the replica source from its replica set.",
scope_types=["project"],
operations=[
{
"method": "POST",
"path": "/v1.0/{account_id}/instances/{instance_id}/action (eject_replica_source)",
},
],
),
base.APIRule(
name="trove:instance:configuration",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get the default configuration template applied to the instance.",
scope_types=["project"],
operations=[
{"method": "GET", "path": "/v1.0/{account_id}/instances/{instance_id}/configuration"},
],
),
base.APIRule(
name="trove:instance:guest_log_list",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get all informations about all logs of a database instance.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/instances/{instance_id}/log"}],
),
base.APIRule(
name="trove:instance:backups",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get all backups of a database instance.",
scope_types=["project"],
operations=[
{"method": "GET", "path": "/v1.0/{account_id}/instances/{instance_id}/backups"},
],
),
base.APIRule(
name="trove:instance:module_list",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get informations about modules on a database instance.",
scope_types=["project"],
operations=[
{"method": "GET", "path": "/v1.0/{account_id}/instances/{instance_id}/modules"},
],
),
base.APIRule(
name="trove:instance:module_apply",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Apply modules to a database instance.",
scope_types=["project"],
operations=[
{"method": "POST", "path": "/v1.0/{account_id}/instances/{instance_id}/modules"},
{"method": "POST", "path": "/v1.0/{account_id}/instances"},
],
),
base.APIRule(
name="trove:instance:module_remove",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Remove a module from a database instance.",
scope_types=["project"],
operations=[
{
"method": "DELETE",
"path": "/v1.0/{account_id}/instances/{instance_id}/modules/{module_id}",
},
],
),
base.APIRule(
name="trove:instance:extension:root:create",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Enable the root user of a database instance.",
scope_types=["project"],
operations=[
{"method": "POST", "path": "/v1.0/{account_id}/instances/{instance_id}/root"},
],
),
base.APIRule(
name="trove:instance:extension:root:delete",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Disable the root user of a database instance.",
scope_types=["project"],
operations=[
{"method": "DELETE", "path": "/v1.0/{account_id}/instances/{instance_id}/root"},
],
),
base.APIRule(
name="trove:instance:extension:root:index",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Show whether the root user of a database instance has been ever enabled.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/instances/{instance_id}/root"}],
),
base.APIRule(
name="trove:cluster:extension:root:create",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Enable the root user of the instances in a cluster.",
scope_types=["project"],
operations=[{"method": "POST", "path": "/v1.0/{account_id}/clusters/{cluster}/root"}],
),
base.APIRule(
name="trove:cluster:extension:root:delete",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Enable the root user of the instances in a cluster.",
scope_types=["project"],
operations=[{"method": "DELETE", "path": "/v1.0/{account_id}/clusters/{cluster}/root"}],
),
base.APIRule(
name="trove:cluster:extension:root:index",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Disable the root of the instances in a cluster.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/clusters/{cluster}/root"}],
),
base.APIRule(
name="trove:instance:extension:user:create",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Create users for a database instance.",
scope_types=["project"],
operations=[
{"method": "POST", "path": "/v1.0/{account_id}/instances/{instance_id}/users"},
{"method": "POST", "path": "/v1.0/{account_id}/instances"},
],
),
base.APIRule(
name="trove:instance:extension:user:delete",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Delete a user from a database instance.",
scope_types=["project"],
operations=[
{
"method": "DELETE",
"path": "/v1.0/{account_id}/instances/{instance_id}/users/{user}",
},
],
),
base.APIRule(
name="trove:instance:extension:user:index",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get all users of a database instance.",
scope_types=["project"],
operations=[
{"method": "GET", "path": "/v1.0/{account_id}/instances/{instance_id}/users"},
],
),
base.APIRule(
name="trove:instance:extension:user:show",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get the information of a single user of a database instance.",
scope_types=["project"],
operations=[
{"method": "GET", "path": "/v1.0/{account_id}/instances/{instance_id}/users/{user}"},
],
),
base.APIRule(
name="trove:instance:extension:user:update",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Update attributes for a user of a database instance.",
scope_types=["project"],
operations=[
{"method": "PUT", "path": "/v1.0/{account_id}/instances/{instance_id}/users/{user}"},
],
),
base.APIRule(
name="trove:instance:extension:user:update_all",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Update the password for one or more users a database instance.",
scope_types=["project"],
operations=[
{"method": "PUT", "path": "/v1.0/{account_id}/instances/{instance_id}/users"},
],
),
base.APIRule(
name="trove:instance:extension:user_access:update",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Grant access for a user to one or more databases.",
scope_types=["project"],
operations=[
{
"method": "PUT",
"path": "/v1.0/{account_id}/instances/{instance_id}/users/{user}/databases",
},
],
),
base.APIRule(
name="trove:instance:extension:user_access:delete",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Revoke access for a user to a databases.",
scope_types=["project"],
operations=[
{
"method": "DELETE",
"path": "/v1.0/{account_id}/instances/{instance_id}/users/{user}/databases/{database}", # noqa
},
],
),
base.APIRule(
name="trove:instance:extension:user_access:index",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get permissions of a user",
scope_types=["project"],
operations=[
{
"method": "GET",
"path": "/v1.0/{account_id}/instances/{instance_id}/users/{user}/databases",
},
],
),
base.APIRule(
name="trove:instance:extension:database:create",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Create a set of Schemas",
scope_types=["project"],
operations=[
{"method": "POST", "path": "/v1.0/{account_id}/instances/{instance_id}/databases"},
{"method": "POST", "path": "/v1.0/{account_id}/instances"},
],
),
base.APIRule(
name="trove:instance:extension:database:delete",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Delete a schema from a database.",
scope_types=["project"],
operations=[
{
"method": "DELETE",
"path": "/v1.0/{account_id}/instances/{instance_id}/databases/{database}",
},
],
),
base.APIRule(
name="trove:instance:extension:database:index",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="List all schemas from a database.",
scope_types=["project"],
operations=[
{"method": "GET", "path": "/v1.0/{account_id}/instances/{instance_id}/databases"},
],
),
base.APIRule(
name="trove:instance:extension:database:show",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get informations of a schema(Currently Not Implemented).",
scope_types=["project"],
operations=[
{
"method": "GET",
"path": "/v1.0/{account_id}/instances/{instance_id}/databases/{database}",
},
],
),
base.APIRule(
name="trove:cluster:create",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Create a cluster.",
scope_types=["project"],
operations=[{"method": "POST", "path": "/v1.0/{account_id}/clusters"}],
),
base.APIRule(
name="trove:cluster:delete",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Delete a cluster.",
scope_types=["project"],
operations=[{"method": "DELETE", "path": "/v1.0/{account_id}/clusters/{cluster}"}],
),
base.APIRule(
name="trove:cluster:force_delete",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Forcibly delete a cluster.",
scope_types=["project"],
operations=[
{"method": "POST", "path": "/v1.0/{account_id}/clusters/{cluster} (reset-status)"},
],
),
base.APIRule(
name="trove:cluster:index",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="List all clusters",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/clusters"}],
),
base.APIRule(
name="trove:cluster:show",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get informations of a cluster.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/clusters/{cluster}"}],
),
base.APIRule(
name="trove:cluster:show_instance",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get informations of a instance in a cluster.",
scope_types=["project"],
operations=[
{
"method": "GET",
"path": "/v1.0/{account_id}/clusters/{cluster}/instances/{instance}",
},
],
),
base.APIRule(
name="trove:cluster:action",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Commit an action against a cluster",
scope_types=["project"],
operations=[{"method": "POST", "path": "/v1.0/{account_id}/clusters/{cluster}"}],
),
base.APIRule(
name="trove:cluster:reset-status",
check_str=("(role:admin or is_admin:True)"),
description="Reset the status of a cluster to NONE.",
scope_types=["project"],
operations=[
{"method": "POST", "path": "/v1.0/{account_id}/clusters/{cluster} (reset-status)"},
],
),
base.APIRule(
name="trove:backup:create",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Create a backup of a database instance.",
scope_types=["project"],
operations=[{"method": "POST", "path": "/v1.0/{account_id}/backups"}],
),
base.APIRule(
name="trove:backup:delete",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Delete a backup of a database instance.",
scope_types=["project"],
operations=[{"method": "DELETE", "path": "/v1.0/{account_id}/backups/{backup}"}],
),
base.APIRule(
name="trove:backup:index",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="List all backups.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/backups"}],
),
base.APIRule(
name="trove:backup:index:all_projects",
check_str=("role:admin"),
description="List backups for all the projects.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/backups"}],
),
base.APIRule(
name="trove:backup:show",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get informations of a backup.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/backups/{backup}"}],
),
base.APIRule(
name="trove:backup_strategy:create",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Create a backup strategy.",
scope_types=["project"],
operations=[{"method": "POST", "path": "/v1.0/{account_id}/backup_strategies"}],
),
base.APIRule(
name="trove:backup_strategy:index",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="List all backup strategies.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/backup_strategies"}],
),
base.APIRule(
name="trove:backup_strategy:delete",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Delete backup strategies.",
scope_types=["project"],
operations=[{"method": "DELETE", "path": "/v1.0/{account_id}/backup_strategies"}],
),
base.APIRule(
name="trove:configuration:create",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Create a configuration group.",
scope_types=["project"],
operations=[{"method": "POST", "path": "/v1.0/{account_id}/configurations"}],
),
base.APIRule(
name="trove:configuration:delete",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Delete a configuration group.",
scope_types=["project"],
operations=[{"method": "DELETE", "path": "/v1.0/{account_id}/configurations/{config}"}],
),
base.APIRule(
name="trove:configuration:index",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="List all configuration groups.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/configurations"}],
),
base.APIRule(
name="trove:configuration:show",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get informations of a configuration group.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/configurations/{config}"}],
),
base.APIRule(
name="trove:configuration:instances",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="List all instances which a configuration group has be assigned to.",
scope_types=["project"],
operations=[
{"method": "GET", "path": "/v1.0/{account_id}/configurations/{config}/instances"},
],
),
base.APIRule(
name="trove:configuration:update",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Update a configuration group(the configuration group will be replaced completely).", # noqa
scope_types=["project"],
operations=[{"method": "PUT", "path": "/v1.0/{account_id}/configurations/{config}"}],
),
base.APIRule(
name="trove:configuration:edit",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Patch a configuration group.",
scope_types=["project"],
operations=[{"method": "PATCH", "path": "/v1.0/{account_id}/configurations/{config}"}],
),
base.APIRule(
name="trove:configuration-parameter:index",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="List all parameters bind to a datastore version.",
scope_types=["project"],
operations=[
{
"method": "GET",
"path": "/v1.0/{account_id}/datastores/{datastore}/versions/{version}/parameters",
},
],
),
base.APIRule(
name="trove:configuration-parameter:show",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get a paramter of a datastore version.",
scope_types=["project"],
operations=[
{
"method": "GET",
"path": "/v1.0/{account_id}/datastores/{datastore}/versions/{version}/parameters/{param}", # noqa
},
],
),
base.APIRule(
name="trove:configuration-parameter:index_by_version",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="List all paramters bind to a datastore version by the id of the version(datastore is not provided).", # noqa
scope_types=["project"],
operations=[
{
"method": "GET",
"path": "/v1.0/{account_id}/datastores/versions/{version}/paramters",
},
],
),
base.APIRule(
name="trove:configuration-parameter:show_by_version",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get a paramter of a datastore version by it names and the id of the version(datastore is not provided).", # noqa
scope_types=["project"],
operations=[
{
"method": "GET",
"path": "/v1.0/{account_id}/datastores/versions/{version}/paramters/{param}",
},
],
),
base.APIRule(
name="trove:datastore:index",
check_str=(""),
description="List all datastores.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/datastores"}],
),
base.APIRule(
name="trove:datastore:show",
check_str=(""),
description="Get informations of a datastore.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/datastores/{datastore}"}],
),
base.APIRule(
name="trove:datastore:delete",
check_str=("(role:admin or is_admin:True)"),
description="Delete a datastore.",
scope_types=["project"],
operations=[{"method": "DELETE", "path": "/v1.0/{account_id}/datastores/{datastore}"}],
),
base.APIRule(
name="trove:datastore:version_show",
check_str=(""),
description="Get a version of a datastore by the version id.",
scope_types=["project"],
operations=[
{
"method": "GET",
"path": "/v1.0/{account_id}/datastores/{datastore}/versions/{version}",
},
],
),
base.APIRule(
name="trove:datastore:version_show_by_uuid",
check_str=(""),
description="Get a version of a datastore by the version id(without providing the datastore id).", # noqa
scope_types=["project"],
operations=[
{"method": "GET", "path": "/v1.0/{account_id}/datastores/versions/{version}"},
],
),
base.APIRule(
name="trove:datastore:version_index",
check_str=(""),
description="Get all versions of a datastore.",
scope_types=["project"],
operations=[
{"method": "GET", "path": "/v1.0/{account_id}/datastores/{datastore}/versions"},
],
),
base.APIRule(
name="trove:datastore:list_associated_flavors",
check_str=(""),
description="List all flavors associated with a datastore version.",
scope_types=["project"],
operations=[
{
"method": "GET",
"path": "/v1.0/{account_id}/datastores/{datastore}/versions/{version}/flavors",
},
],
),
base.APIRule(
name="trove:datastore:list_associated_volume_types",
check_str=(""),
description="List all volume-types associated with a datastore version.",
scope_types=["project"],
operations=[
{
"method": "GET",
"path": "/v1.0/{account_id}/datastores/{datastore}/versions/{version}/volume-types", # noqa
},
],
),
base.APIRule(
name="trove:flavor:index",
check_str=(""),
description="List all flavors.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/flavors"}],
),
base.APIRule(
name="trove:flavor:show",
check_str=(""),
description="Get information of a flavor.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/flavors/{flavor}"}],
),
base.APIRule(
name="trove:limits:index",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="List all absolute and rate limit informations.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/limits"}],
),
base.APIRule(
name="trove:module:create",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Create a module.",
scope_types=["project"],
operations=[{"method": "POST", "path": "/v1.0/{account_id}/modules"}],
),
base.APIRule(
name="trove:module:delete",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Delete a module.",
scope_types=["project"],
operations=[{"method": "DELETE", "path": "/v1.0/{account_id}/modules/{module}"}],
),
base.APIRule(
name="trove:module:index",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="List all modules.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/modules"}],
),
base.APIRule(
name="trove:module:show",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Get informations of a module.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/modules/{module}"}],
),
base.APIRule(
name="trove:module:instances",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="List all instances to which a module is applied.",
scope_types=["project"],
operations=[{"method": "GET", "path": "/v1.0/{account_id}/modules/{module}/instances"}],
),
base.APIRule(
name="trove:module:update",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Update a module.",
scope_types=["project"],
operations=[{"method": "PUT", "path": "/v1.0/{account_id}/modules/{module}"}],
),
base.APIRule(
name="trove:module:reapply",
check_str=("((role:admin or is_admin:True) or project_id:%(project_id)s)"),
description="Reapply a module to all instances.",
scope_types=["project"],
operations=[{"method": "PUT", "path": "/v1.0/{account_id}/modules/{module}/instances"}],
),
)
__all__ = ("list_rules",)
| 42.65786
| 135
| 0.566208
| 3,589
| 32,292
| 4.936194
| 0.045974
| 0.072646
| 0.034771
| 0.069542
| 0.919734
| 0.884003
| 0.864134
| 0.840032
| 0.82242
| 0.815478
| 0
| 0.007393
| 0.262759
| 32,292
| 756
| 136
| 42.714286
| 0.736758
| 0.001208
| 0
| 0.611406
| 0
| 0.001326
| 0.511623
| 0.245903
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.001326
| 0.001326
| 0
| 0.001326
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c7cfe43fce7595bc77bd26082d55fa1170fea575
| 88
|
py
|
Python
|
examples/plugins/workbench/AcmeLab/acme/workbench/perspective/api.py
|
janvonrickenbach/Envisage_wxPhoenix_py3
|
cf79e5b2a0c3b46898a60b5fe5a2fb580604808b
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plugins/workbench/AcmeLab/acme/workbench/perspective/api.py
|
janvonrickenbach/Envisage_wxPhoenix_py3
|
cf79e5b2a0c3b46898a60b5fe5a2fb580604808b
|
[
"BSD-3-Clause"
] | 1
|
2017-05-22T21:15:22.000Z
|
2017-05-22T21:15:22.000Z
|
examples/plugins/workbench/AcmeLab/acme/workbench/perspective/api.py
|
janvonrickenbach/Envisage_wxPhoenix_py3
|
cf79e5b2a0c3b46898a60b5fe5a2fb580604808b
|
[
"BSD-3-Clause"
] | 1
|
2019-10-01T07:03:58.000Z
|
2019-10-01T07:03:58.000Z
|
from .bar_perspective import BarPerspective
from .foo_perspective import FooPerspective
| 29.333333
| 43
| 0.886364
| 10
| 88
| 7.6
| 0.7
| 0.447368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 88
| 2
| 44
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c7e5d34715ab642e0c6d911a1c1e53f70ac3f30e
| 38
|
py
|
Python
|
tests/form/__init__.py
|
warownia1/Slivca
|
5491afec63c8cd41d6f1389a5dd0ba9877b888a1
|
[
"Apache-2.0"
] | 5
|
2016-09-01T15:30:46.000Z
|
2019-07-15T12:26:46.000Z
|
tests/form/__init__.py
|
warownia1/Slivca
|
5491afec63c8cd41d6f1389a5dd0ba9877b888a1
|
[
"Apache-2.0"
] | 75
|
2016-08-31T11:32:49.000Z
|
2021-05-12T14:33:17.000Z
|
tests/form/__init__.py
|
warownia1/Slivca
|
5491afec63c8cd41d6f1389a5dd0ba9877b888a1
|
[
"Apache-2.0"
] | 3
|
2017-06-01T10:21:04.000Z
|
2020-06-12T10:32:49.000Z
|
from .custom_field import CustomField
| 19
| 37
| 0.868421
| 5
| 38
| 6.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4009d0663db2ae5a73ba8a95e75bec1ce83e8b09
| 70
|
py
|
Python
|
FoodMarket/1.py
|
Starrynighty0917/FoodMarket
|
7978cc7671d52f1ba421d8db8c463870a4866328
|
[
"MIT"
] | null | null | null |
FoodMarket/1.py
|
Starrynighty0917/FoodMarket
|
7978cc7671d52f1ba421d8db8c463870a4866328
|
[
"MIT"
] | null | null | null |
FoodMarket/1.py
|
Starrynighty0917/FoodMarket
|
7978cc7671d52f1ba421d8db8c463870a4866328
|
[
"MIT"
] | null | null | null |
from FoodMarket.settings import BASE_DIR
print(":::::::::"+BASE_DIR)
| 17.5
| 40
| 0.7
| 9
| 70
| 5.222222
| 0.777778
| 0.297872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 70
| 4
| 41
| 17.5
| 0.734375
| 0
| 0
| 0
| 0
| 0
| 0.126761
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
401b57739e52107ddcd28ee48fea336718c3c96b
| 1,371
|
py
|
Python
|
RecoMET/METProducers/python/METSignificanceParams_cfi.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
RecoMET/METProducers/python/METSignificanceParams_cfi.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
RecoMET/METProducers/python/METSignificanceParams_cfi.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
METSignificanceParams = cms.PSet(
# jet resolutions
jetThreshold = cms.double(15),
#jet-lepton matching dR
dRMatch = cms.double(0.4),
# eta bins for jet resolution tuning
jeta = cms.vdouble(0.8, 1.3, 1.9, 2.5),
# tuning parameters
#Run I, based on 53X / JME-13-003
#jpar = cms.vdouble(1.20,1.13,1.03,0.96,1.08),
#pjpar = cms.vdouble(-1.9,0.6383)
#Run II MC, based on 76X
#https://indico.cern.ch/event/527789/contributions/2160488/attachments/1271716/1884792/nmirman_20160511.pdf
jpar = cms.vdouble(1.29,1.19,1.07,1.13,1.12),
pjpar = cms.vdouble(-0.04,0.6504),
)
METSignificanceParams_Data=cms.PSet(
# jet resolutions
jetThreshold = cms.double(15),
#jet-lepton matching dR
dRMatch = cms.double(0.4),
# eta bins for jet resolution tuning
jeta = cms.vdouble(0.8, 1.3, 1.9, 2.5),
# tuning parameters
#Run I, based on 53X / JME-13-003
#jpar = cms.vdouble(1.20,1.13,1.03,0.96,1.08),
#pjpar = cms.vdouble(-1.9,0.6383)
#Run II data, based on 76X
#https://indico.cern.ch/event/527789/contributions/2160488/attachments/1271716/1884792/nmirman_20160511.pdf
jpar = cms.vdouble(1.26,1.14,1.13,1.13,1.06),
pjpar = cms.vdouble(-3.3,0.5961),
)
| 31.159091
| 113
| 0.617797
| 217
| 1,371
| 3.889401
| 0.35023
| 0.118483
| 0.078199
| 0.07109
| 0.810427
| 0.810427
| 0.810427
| 0.810427
| 0.810427
| 0.810427
| 0
| 0.191246
| 0.233406
| 1,371
| 43
| 114
| 31.883721
| 0.611798
| 0.480671
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
401e81bb2bdd923f8c901b49121472e38efa1cc1
| 35,636
|
py
|
Python
|
irlmethods/general_deep_maxent.py
|
ranok92/deepirl
|
88c7e76986243cf0b988d8d7dc0eef6b58e07864
|
[
"MIT"
] | 2
|
2019-01-04T22:03:15.000Z
|
2019-04-03T00:16:11.000Z
|
irlmethods/general_deep_maxent.py
|
ranok92/deepirl
|
88c7e76986243cf0b988d8d7dc0eef6b58e07864
|
[
"MIT"
] | null | null | null |
irlmethods/general_deep_maxent.py
|
ranok92/deepirl
|
88c7e76986243cf0b988d8d7dc0eef6b58e07864
|
[
"MIT"
] | null | null | null |
"""
Implements deep maxent IRL (Wulfmeier et. all) in a general, feature-type
agnostic way.
"""
import sys
import random
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from tensorboardX import SummaryWriter
sys.path.insert(0, "..")
from neural_nets.base_network import BaseNN
from irlmethods.irlUtils import play_features as play
from irlmethods.irlUtils import lcr_regularizer, monotonic_regularizer
from rlmethods.rlutils import play_complete
import utils
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
class RewardNet(BaseNN):
"""Reward network"""
def __init__(self, state_dims, hidden_dims=128):
super(RewardNet, self).__init__()
self.input = nn.Linear(state_dims, hidden_dims)
self.linear1 = nn.Linear(hidden_dims, hidden_dims)
self.linear2 = nn.Linear(hidden_dims, hidden_dims)
self.head = nn.Linear(hidden_dims, 1)
def forward(self, x):
x = F.relu(self.input(x))
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = torch.tanh(self.head(x))
return x
class GeneralDeepMaxent:
"""
Implements deep maxent IRL (Wulfmeier et. al) in a state-type agnostic way.
"""
def __init__(
self,
rl,
env,
expert_trajectories,
learning_rate=1e-3,
l2_regularization=1e-5,
save_folder="./",
saving_interval=10,
):
# RL related
self.rl = rl
self.feature_extractor = self.rl.feature_extractor
# environment attributes
self.env = env
state_size = self.feature_extractor.extract_features(
env.reset()
).shape[0]
# reward net
self.reward_net = RewardNet(state_size, hidden_dims=256)
self.reward_net = self.reward_net.to(DEVICE)
self.reward_optim = Adam(
self.reward_net.parameters(),
lr=learning_rate,
weight_decay=l2_regularization,
)
# expert info
self.expert_trajectories = [
traj.to(torch.float).to(DEVICE) for traj in expert_trajectories
]
# logging and saving
self.save_path = Path(save_folder)
self.tbx_writer = SummaryWriter(
str(self.save_path / "tensorboard_logs")
)
# highjack RL method's tbx_writer
self.rl.tbx_writer = self.tbx_writer
self.data_table = utils.DataTable()
# training meta
self.training_i = 0
self.saving_interval = saving_interval
def save_models(self, filename=None):
self.rl.policy.save(str(self.save_path / "policy"), filename=filename)
self.reward_net.save(
str(self.save_path / "reward_net"), filename=filename
)
def generate_trajectories(
self, num_trajectories, max_env_steps, stochastic,
):
"""
Generate trajectories in environemnt using leanred RL policy.
:param num_trajectories: number of trajectories to generate.
:type num_trajectories: int
:param max_env_steps: max steps to take in environment (rollout length.)
:type max_env_steps: int
:return: list of features encountered in playthrough.
:rtype: list of tensors of shape (num_states x feature_length)
"""
states = []
for _ in range(num_trajectories):
generated_states = play(
self.env,
self.rl.policy,
self.feature_extractor,
max_env_steps,
stochastic,
)
states.append(generated_states)
return states
def discounted_rewards(self, rewards, gamma, account_for_terminal_state):
discounted_sum = 0
t = 0
gamma_t = 1
for t, reward in enumerate(rewards[:-1]):
discounted_sum += gamma_t * reward
gamma_t *= gamma
if account_for_terminal_state:
discounted_sum += (
(gamma / (1 - gamma)) * gamma ** (t + 1) * rewards[-1]
)
else:
discounted_sum += gamma_t * rewards[-1]
return discounted_sum
def train_episode(
self,
num_rl_episodes,
max_rl_episode_length,
num_trajectory_samples,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
):
"""
perform IRL training.
:param num_rl_episodes: Number of RL iterations for this IRL iteration.
:type num_rl_episodes: int.
:param max_rl_episode_length: maximum number of environment steps to
take when doing rollouts using learned RL agent.
:type max_rl_episode_length: int
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param max_env_steps: maximum number of environment steps to take,
both when training RL agent and when generating rollouts.
:type max_env_steps: int
:param reset_training: Whether to reset RL training every iteration
or not.
:type reset_training: Boolean.
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
:param stochastic_sampling: Sample trajectories using stochastic
policy instead of deterministic 'best action policy'
:type stochastic_sampling: Boolean.
"""
# expert loss
expert_loss = 0
for traj in self.expert_trajectories:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = self.generate_trajectories(
num_trajectory_samples, max_env_steps, stochastic_sampling
)
policy_loss = 0
for traj in trajectories:
policy_rewards = self.reward_net(traj)
policy_loss += self.discounted_rewards(
policy_rewards, gamma, account_for_terminal_state
)
policy_loss = (
len(self.expert_trajectories) / num_trajectory_samples
) * policy_loss
# Backpropagate IRL loss
loss = policy_loss - expert_loss
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# train RL agent
if reset_training:
self.rl.reset_training()
self.rl.train(
num_rl_episodes,
max_rl_episode_length,
reward_network=self.reward_net,
)
# logging
self.tbx_writer.add_scalar(
"IRL/policy_loss", policy_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": policy_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
},
self.training_i,
)
# save policy and reward network
# TODO: make a uniform dumping function for all agents.
self.save_models(filename="{}.pt".format(self.training_i))
# increment training counter
self.training_i += 1
def train(
self,
num_irl_episodes,
num_rl_episodes,
max_rl_episode_length,
num_trajectory_samples,
max_env_steps,
reset_training=False,
account_for_terminal_state=False,
gamma=0.99,
stochastic_sampling=False,
):
"""
Runs the train_episode() function for 'num_irl_episodes' times. Other
parameters are identical to the aforementioned function, with the same
description and requirements.
"""
for _ in range(num_irl_episodes):
print("IRL episode {}".format(self.training_i), end="\r")
self.train_episode(
num_rl_episodes,
max_rl_episode_length,
num_trajectory_samples,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
)
class MixingDeepMaxent(GeneralDeepMaxent):
def __init__(
self,
rl,
env,
expert_trajectories,
learning_rate=0.001,
l2_regularization=1e-05,
save_folder="./",
saving_interval=25,
):
super().__init__(
rl,
env,
expert_trajectories,
learning_rate=learning_rate,
l2_regularization=l2_regularization,
save_folder=save_folder,
saving_interval=saving_interval,
)
# expert and training datasets
self.all_trajectories = random.sample(
expert_trajectories, len(expert_trajectories)
)
self.expert_label_trajectories = [
traj.to(torch.float).to(DEVICE)
for traj in self.all_trajectories[
: len(self.all_trajectories) // 2
]
]
self.expert_train_trajectories = [
traj.to(torch.float).to(DEVICE)
for traj in self.all_trajectories[
len(self.all_trajectories) // 2 :
]
]
self.pre_data_table = utils.DataTable()
# initial model save
self.save_models(filename="initial_save.pt")
def train_episode(
self,
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
num_expert_samples,
num_policy_samples,
):
"""
perform IRL with mix-in of expert samples.
:param num_rl_episodes: Number of RL iterations for this IRL iteration.
:type num_rl_episodes: int.
:param max_rl_episode_length: maximum number of environment steps to
take when doing rollouts using learned RL agent.
:type max_rl_episode_length: int
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param max_env_steps: maximum number of environment steps to take,
both when training RL agent and when generating rollouts.
:type max_env_steps: int
:param reset_training: Whether to reset RL training every iteration
or not.
:type reset_training: Boolean.
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
:param stochastic_sampling: Sample trajectories using stochastic
policy instead of deterministic 'best action policy'
:type stochastic_sampling: Boolean.
"""
# expert loss
expert_loss = 0
expert_samples = random.sample(
self.expert_trajectories, num_expert_samples
)
for traj in expert_samples:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = self.generate_trajectories(
num_expert_samples // 2, max_env_steps, stochastic_sampling
)
# mix in expert samples.
trajectories.extend(
random.sample(self.expert_trajectories, num_policy_samples // 2)
)
policy_loss = 0
for traj in trajectories:
policy_rewards = self.reward_net(traj)
policy_loss += self.discounted_rewards(
policy_rewards, gamma, account_for_terminal_state
)
policy_loss = (num_expert_samples / num_policy_samples) * policy_loss
# Backpropagate IRL loss
loss = policy_loss - expert_loss
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# train RL agent
if reset_training:
self.rl.reset_training()
self.rl.train(
num_rl_episodes,
max_rl_episode_length,
reward_network=self.reward_net,
)
# logging
self.tbx_writer.add_scalar(
"IRL/policy_loss", policy_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": policy_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
},
self.training_i,
)
# save policy and reward network
# TODO: make a uniform dumping function for all agents.
if (self.training_i + 1) % self.saving_interval == 0:
self.save_models(filename="{}.pt".format(self.training_i))
# increment training counter
self.training_i += 1
def pre_train_episode(
self, num_trajectory_samples, account_for_terminal_state, gamma,
):
"""
perform IRL pre-training by using only expert samples.
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
"""
# expert loss
expert_loss = 0
expert_sample = random.sample(
self.expert_label_trajectories, num_trajectory_samples
)
for traj in expert_sample:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = random.sample(
self.expert_train_trajectories, num_trajectory_samples
)
generator_loss = 0
for traj in trajectories:
policy_rewards = self.reward_net(traj)
generator_loss += self.discounted_rewards(
policy_rewards, gamma, account_for_terminal_state
)
generator_loss = (
len(self.expert_trajectories) / num_trajectory_samples
) * generator_loss
# Backpropagate IRL loss
loss = generator_loss - expert_loss
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# logging
self.tbx_writer.add_scalar(
"pre_IRL/generator_loss", generator_loss, self.training_i
)
self.tbx_writer.add_scalar(
"pre_IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("pre_IRL/total_loss", loss, self.training_i)
self.pre_data_table.add_row(
{
"pre_IRL/policy_loss": generator_loss.item(),
"pre_IRL/expert_loss": expert_loss.item(),
"pre_IRL/total_loss": loss.item(),
},
self.training_i,
)
# save policy and reward network
self.reward_net.save(
str(self.save_path / "reward_net"),
filename="pre_{}.pt".format(self.training_i),
)
# increment training counter
self.training_i += 1
def pre_train(
self,
num_pretrain_episodes,
num_trajectory_samples,
account_for_terminal_state=False,
gamma=0.99,
):
"""
Runs the train_episode() function for 'num_irl_episodes' times. Other
parameters are identical to the aforementioned function, with the same
description and requirements.
"""
for _ in range(num_pretrain_episodes):
print(
"IRL pre-training episode {}".format(self.training_i), end="\r"
)
self.pre_train_episode(
num_trajectory_samples, account_for_terminal_state, gamma
)
def train(
self,
num_irl_episodes,
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training=False,
account_for_terminal_state=False,
gamma=0.99,
stochastic_sampling=False,
num_expert_samples=64,
num_policy_samples=64,
):
"""
Runs the train_episode() function for 'num_irl_episodes' times. Other
parameters are identical to the aforementioned function, with the same
description and requirements.
"""
for _ in range(num_irl_episodes):
print("IRL episode {}".format(self.training_i), end="\r")
self.train_episode(
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
num_expert_samples,
num_policy_samples,
)
# final model save
self.save_models(filename="final.pt")
class GCL(MixingDeepMaxent):
def generate_trajectories(self, num_trajectories, max_env_steps, ped_id=None):
"""
Generate trajectories in environemnt using leanred RL policy.
:param num_trajectories: number of trajectories to generate.
:type num_trajectories: int
:param max_env_steps: max steps to take in environment (rollout length.)
:type max_env_steps: int
:return: list of features encountered in playthrough.
:rtype: list of tensors of shape (num_states x feature_length)
"""
buffers = []
for _ in range(num_trajectories):
generated_buffer = play_complete(
self.rl.policy,
self.env,
self.feature_extractor,
max_env_steps,
ped_id=ped_id
)
buffers.append(generated_buffer)
return buffers
def train_episode(
self,
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
num_expert_samples,
num_policy_samples,
):
"""
perform IRL with mix-in of expert samples.
:param num_rl_episodes: Number of RL iterations for this IRL iteration.
:type num_rl_episodes: int.
:param max_rl_episode_length: maximum number of environment steps to
take when doing rollouts using learned RL agent.
:type max_rl_episode_length: int
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param max_env_steps: maximum number of environment steps to take,
both when training RL agent and when generating rollouts.
:type max_env_steps: int
:param reset_training: Whether to reset RL training every iteration
or not.
:type reset_training: Boolean.
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
:param stochastic_sampling: Sample trajectories using stochastic
policy instead of deterministic 'best action policy'
:type stochastic_sampling: Boolean.
"""
# regularizers
g_lcr = 0
g_mono = 0
# expert loss
expert_loss = 0
expert_samples = random.sample(
self.expert_trajectories, num_expert_samples
)
for traj in expert_samples:
expert_rewards = self.reward_net(traj)
# update regularizers
g_lcr += lcr_regularizer(expert_rewards)
g_mono += monotonic_regularizer(expert_rewards)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = self.generate_trajectories(
num_expert_samples, max_env_steps
)
rewards = []
log_pis = []
for traj in trajectories:
states = [
torch.from_numpy(tran.state).to(torch.float).to(DEVICE)
for tran in traj
]
states.append(
torch.from_numpy(traj[-1].next_state)
.to(torch.float)
.to(DEVICE)
)
states = torch.stack(states)
reward = self.reward_net(states)
#update regularizers
g_lcr += lcr_regularizer(reward)
g_mono += lcr_regularizer(reward)
reward_sum = self.discounted_rewards(reward, gamma, traj[-1].done)
rewards.append(reward_sum)
log_pi = [
torch.from_numpy(tran.action_log_prob)
.to(torch.float)
.to(DEVICE)
for tran in traj
]
log_pis.append(torch.tensor(log_pi).sum())
# log sum exp trick
exponents = torch.cat(rewards) - torch.tensor(log_pis).to(DEVICE)
max_exponent = torch.max(exponents)
log_Z = max_exponent + torch.log(
torch.exp(exponents - max_exponent).sum()
)
policy_loss = log_Z
policy_loss = (num_expert_samples) * policy_loss
# Backpropagate IRL loss
loss = policy_loss - expert_loss + g_mono + g_lcr
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# train RL agent
if reset_training:
self.rl.reset_training()
self.rl.train(
num_rl_episodes,
max_rl_episode_length,
reward_network=self.reward_net,
)
# logging
self.tbx_writer.add_scalar(
"IRL/policy_loss", policy_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.tbx_writer.add_scalar("IRL/log_Z", log_Z.item(), self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": policy_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
"IRL/log_Z": log_Z.item(),
},
self.training_i,
)
# save policy and reward network
# TODO: make a uniform dumping function for all agents.
if (self.training_i + 1) % self.saving_interval == 0:
self.save_models(filename="{}.pt".format(self.training_i))
# increment training counter
self.training_i += 1
class PerTrajGCL(GCL):
def train_episode(
self,
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
num_expert_samples,
num_policy_samples,
):
"""
perform IRL with mix-in of expert samples.
:param num_rl_episodes: Number of RL iterations for this IRL iteration.
:type num_rl_episodes: int.
:param max_rl_episode_length: maximum number of environment steps to
take when doing rollouts using learned RL agent.
:type max_rl_episode_length: int
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param max_env_steps: maximum number of environment steps to take,
both when training RL agent and when generating rollouts.
:type max_env_steps: int
:param reset_training: Whether to reset RL training every iteration
or not.
:type reset_training: Boolean.
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
:param stochastic_sampling: Sample trajectories using stochastic
policy instead of deterministic 'best action policy'
:type stochastic_sampling: Boolean.
"""
# regularizers
g_lcr = 0
g_mono = 0
# expert loss
expert_loss = 0
expert_samples = random.sample(
list(enumerate(self.expert_trajectories)), num_expert_samples
)
for _, traj in expert_samples:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# update regularizers
g_lcr += lcr_regularizer(expert_rewards)
g_mono += monotonic_regularizer(expert_rewards)
# policy loss
trajectories = []
for idx, _ in expert_samples:
trajectories.extend(
self.generate_trajectories(
num_policy_samples, max_env_steps, idx + 1
)
)
policy_loss = 0
# mix in expert samples.
expert_mixin_samples = random.sample(
self.expert_trajectories, num_policy_samples // 2
)
rewards = []
log_pis = []
for traj in trajectories:
states = [
torch.from_numpy(tran.state).to(torch.float).to(DEVICE)
for tran in traj
]
states.append(
torch.from_numpy(traj[-1].next_state)
.to(torch.float)
.to(DEVICE)
)
states = torch.stack(states)
reward = self.reward_net(states)
# update regularizers
g_lcr += lcr_regularizer(reward)
g_mono += monotonic_regularizer(reward)
reward_sum = self.discounted_rewards(reward, gamma, traj[-1].done)
rewards.append(reward_sum)
log_pi = [
torch.from_numpy(tran.action_log_prob)
.to(torch.float)
.to(DEVICE)
for tran in traj
]
log_pis.append(torch.tensor(log_pi).sum())
# log sum exp trick
exponents = torch.cat(rewards) - torch.tensor(log_pis).to(DEVICE)
max_exponent = torch.max(exponents)
log_Z = max_exponent + torch.log(
torch.exp(exponents - max_exponent).sum()
)
policy_loss += log_Z
policy_loss = (num_expert_samples) * policy_loss
# Backpropagate IRL loss
loss = policy_loss - expert_loss + g_mono + g_lcr
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# train RL agent
if reset_training:
self.rl.reset_training()
self.rl.train(
num_rl_episodes,
max_rl_episode_length,
reward_network=self.reward_net,
)
# logging
self.tbx_writer.add_scalar(
"IRL/policy_loss", policy_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.tbx_writer.add_scalar("IRL/log_Z", log_Z.item(), self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": policy_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
"IRL/log_Z": log_Z.item(),
},
self.training_i,
)
# save policy and reward network
# TODO: make a uniform dumping function for all agents.
if (self.training_i + 1) % self.saving_interval == 0:
self.save_models(filename="{}.pt".format(self.training_i))
# increment training counter
self.training_i += 1
class ExpertOnlyMaxent:
"""
Implements expert only deep maxent, using only expert demonstrations and
no environment interaction.
"""
def __init__(
self,
state_size,
expert_trajectories,
learning_rate=1e-3,
l2_regularization=1e-5,
save_folder="./",
):
# reward net
self.reward_net = RewardNet(state_size, hidden_dims=256)
self.reward_net = self.reward_net.to(DEVICE)
self.reward_optim = Adam(
self.reward_net.parameters(),
lr=learning_rate,
weight_decay=l2_regularization,
)
# expert and training datasets
self.all_trajectories = random.sample(
expert_trajectories, len(expert_trajectories)
)
self.expert_trajectories = [
traj.to(torch.float).to(DEVICE)
for traj in self.all_trajectories[
: len(self.all_trajectories) // 2
]
]
self.training_trajectories = [
traj.to(torch.float).to(DEVICE)
for traj in self.all_trajectories[
len(self.all_trajectories) // 2 :
]
]
# logging and saving
self.save_path = Path(save_folder)
self.tbx_writer = SummaryWriter(
str(self.save_path / "tensorboard_logs")
)
self.data_table = utils.DataTable()
# training meta
self.training_i = 0
def discounted_rewards(self, rewards, gamma, account_for_terminal_state):
discounted_sum = 0
t = 0
gamma_t = 1
for t, reward in enumerate(rewards[:-1]):
discounted_sum += gamma_t * reward
gamma_t *= gamma
if account_for_terminal_state:
discounted_sum += (
(gamma / (1 - gamma)) * gamma ** (t + 1) * rewards[-1]
)
else:
discounted_sum += gamma_t * rewards[-1]
return discounted_sum
def train_episode(
self, num_trajectory_samples, account_for_terminal_state, gamma,
):
"""
perform IRL pre-training by using only expert samples.
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
"""
# expert loss
expert_loss = 0
expert_sample = random.sample(
self.expert_trajectories, num_trajectory_samples
)
for traj in expert_sample:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = random.sample(
self.training_trajectories, num_trajectory_samples
)
generator_loss = 0
for traj in trajectories:
policy_rewards = self.reward_net(traj)
generator_loss += self.discounted_rewards(
policy_rewards, gamma, account_for_terminal_state
)
generator_loss = (
len(self.expert_trajectories) / num_trajectory_samples
) * generator_loss
# Backpropagate IRL loss
loss = generator_loss - expert_loss
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# logging
self.tbx_writer.add_scalar(
"IRL/generator_loss", generator_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": generator_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
},
self.training_i,
)
# save policy and reward network
self.reward_net.save(str(self.save_path / "reward_net"))
# increment training counter
self.training_i += 1
def train(
self,
num_episodes,
num_trajectory_samples,
account_for_terminal_state=False,
gamma=0.99,
):
"""
Runs the train_episode() function for 'num_irl_episodes' times. Other
parameters are identical to the aforementioned function, with the same
description and requirements.
"""
for _ in range(num_episodes):
print(
"IRL pre-training episode {}".format(self.training_i), end="\r"
)
self.train_episode(
num_trajectory_samples, account_for_terminal_state, gamma
)
| 31.425044
| 82
| 0.599282
| 4,132
| 35,636
| 4.897144
| 0.071394
| 0.028466
| 0.029553
| 0.045466
| 0.902001
| 0.89276
| 0.872152
| 0.868495
| 0.865135
| 0.851791
| 0
| 0.007283
| 0.32571
| 35,636
| 1,133
| 83
| 31.45278
| 0.834825
| 0.262038
| 0
| 0.686765
| 0
| 0
| 0.033062
| 0.000888
| 0
| 0
| 0
| 0.00353
| 0
| 1
| 0.029412
| false
| 0
| 0.019118
| 0
| 0.064706
| 0.005882
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4022aeed50e201cee8d2db6f51544e9857935e63
| 89
|
py
|
Python
|
src/superlists/lists/views.py
|
ryuji0123/tdd_with_python
|
500f96c8aff6d01a9f2dbb7470cd341019304748
|
[
"MIT"
] | null | null | null |
src/superlists/lists/views.py
|
ryuji0123/tdd_with_python
|
500f96c8aff6d01a9f2dbb7470cd341019304748
|
[
"MIT"
] | null | null | null |
src/superlists/lists/views.py
|
ryuji0123/tdd_with_python
|
500f96c8aff6d01a9f2dbb7470cd341019304748
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def home_page():
pass
| 14.833333
| 35
| 0.741573
| 13
| 89
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191011
| 89
| 5
| 36
| 17.8
| 0.902778
| 0.258427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
40434ba961e770c5310c1a405296f60272acd88b
| 116
|
py
|
Python
|
bolo/src/bolo/auth/__init__.py
|
KrazyKahunaGuy/Bolo-Backend-Basic-Authentication-
|
a0c893674ecf3c73d6c5267298334167dd400670
|
[
"BSD-3-Clause"
] | null | null | null |
bolo/src/bolo/auth/__init__.py
|
KrazyKahunaGuy/Bolo-Backend-Basic-Authentication-
|
a0c893674ecf3c73d6c5267298334167dd400670
|
[
"BSD-3-Clause"
] | null | null | null |
bolo/src/bolo/auth/__init__.py
|
KrazyKahunaGuy/Bolo-Backend-Basic-Authentication-
|
a0c893674ecf3c73d6c5267298334167dd400670
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import Blueprint
auth = Blueprint("auth", __name__, url_prefix="/user")
from bolo.auth import endpoints
| 23.2
| 54
| 0.775862
| 16
| 116
| 5.3125
| 0.6875
| 0.305882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 116
| 5
| 55
| 23.2
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
405d00a57fe20265bb2a4768a85b12127a254203
| 49
|
py
|
Python
|
carvajal/__init__.py
|
coalfire/carvajal
|
d1f36f840629835ce52b3005ca7d38093c6abead
|
[
"MIT"
] | null | null | null |
carvajal/__init__.py
|
coalfire/carvajal
|
d1f36f840629835ce52b3005ca7d38093c6abead
|
[
"MIT"
] | null | null | null |
carvajal/__init__.py
|
coalfire/carvajal
|
d1f36f840629835ce52b3005ca7d38093c6abead
|
[
"MIT"
] | null | null | null |
"""
Carvajal
"""
from carvajal import __about__
| 8.166667
| 30
| 0.714286
| 5
| 49
| 6.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 49
| 5
| 31
| 9.8
| 0.756098
| 0.163265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
405fdfb4c2669bec4514074455b72c6c2d5a3d1e
| 912
|
py
|
Python
|
selenium-examples/pytest/vdc/tests/test_add_to_cart.py
|
sauceaaron/demo-python
|
cd7e0a8a9860771000a231371e64d7728f930d0c
|
[
"MIT"
] | null | null | null |
selenium-examples/pytest/vdc/tests/test_add_to_cart.py
|
sauceaaron/demo-python
|
cd7e0a8a9860771000a231371e64d7728f930d0c
|
[
"MIT"
] | null | null | null |
selenium-examples/pytest/vdc/tests/test_add_to_cart.py
|
sauceaaron/demo-python
|
cd7e0a8a9860771000a231371e64d7728f930d0c
|
[
"MIT"
] | 1
|
2021-12-07T16:18:36.000Z
|
2021-12-07T16:18:36.000Z
|
import pytest
def test_add_to_cart(vdc_driver):
vdc_driver.get('https://www.saucedemo.com/inventory.html')
vdc_driver.find_element_by_class_name('btn_primary').click()
assert vdc_driver.find_element_by_class_name('shopping_cart_badge').text == '1'
vdc_driver.get('https://www.saucedemo.com/cart.html')
expected = vdc_driver.find_elements_by_class_name('inventory_item_name')
assert len(expected) == 1
def test_add_two_to_cart(vdc_driver):
vdc_driver.get('https://www.saucedemo.com/inventory.html')
vdc_driver.find_element_by_class_name('btn_primary').click()
vdc_driver.find_element_by_class_name('btn_primary').click()
assert vdc_driver.find_element_by_class_name('shopping_cart_badge').text == '2'
vdc_driver.get('https://www.saucedemo.com/cart.html')
expected = vdc_driver.find_elements_by_class_name('inventory_item_name')
assert len(expected) == 2
| 38
| 83
| 0.767544
| 139
| 912
| 4.611511
| 0.251799
| 0.182527
| 0.141966
| 0.156006
| 0.939158
| 0.939158
| 0.939158
| 0.939158
| 0.939158
| 0.939158
| 0
| 0.00489
| 0.10307
| 912
| 23
| 84
| 39.652174
| 0.778729
| 0
| 0
| 0.5625
| 0
| 0
| 0.286184
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
409f9d9fbe9d37260565c8ac3ce63b5fda2b679f
| 38
|
py
|
Python
|
ipwebcam/__init__.py
|
Michael-Jalloh/IPWebcam
|
dbc41f76f112bb4071758d6d72c1c93acaec7304
|
[
"MIT"
] | 9
|
2017-11-16T06:15:56.000Z
|
2020-02-05T16:36:28.000Z
|
ipwebcam/__init__.py
|
Michael-Jalloh/ipwebcam
|
dbc41f76f112bb4071758d6d72c1c93acaec7304
|
[
"MIT"
] | null | null | null |
ipwebcam/__init__.py
|
Michael-Jalloh/ipwebcam
|
dbc41f76f112bb4071758d6d72c1c93acaec7304
|
[
"MIT"
] | 1
|
2020-01-20T14:52:45.000Z
|
2020-01-20T14:52:45.000Z
|
from ipwebcam.ipwebcam import IPWEBCAM
| 38
| 38
| 0.894737
| 5
| 38
| 6.8
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
40d5e4a581eb2b3c663cbb74e41ce166a29b89c5
| 30
|
py
|
Python
|
supervenn/tests/__init__.py
|
srcoulombe/supervenn
|
63c8e0636b465c1e26c224044d3397e4768d5d68
|
[
"MIT"
] | 1
|
2021-03-15T20:15:31.000Z
|
2021-03-15T20:15:31.000Z
|
supervenn/tests/__init__.py
|
srcoulombe/supervenn
|
63c8e0636b465c1e26c224044d3397e4768d5d68
|
[
"MIT"
] | null | null | null |
supervenn/tests/__init__.py
|
srcoulombe/supervenn
|
63c8e0636b465c1e26c224044d3397e4768d5d68
|
[
"MIT"
] | null | null | null |
from . import algorithms_test
| 15
| 29
| 0.833333
| 4
| 30
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
40ffc31f4b93a8de8e98ef9adc123e692d210080
| 13,980
|
py
|
Python
|
torch/nn/grad.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 60,067
|
2017-01-18T17:21:31.000Z
|
2022-03-31T21:37:45.000Z
|
torch/nn/grad.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 66,955
|
2017-01-18T17:21:38.000Z
|
2022-03-31T23:56:11.000Z
|
torch/nn/grad.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 19,210
|
2017-01-18T17:45:04.000Z
|
2022-03-31T23:51:56.000Z
|
"""Gradient interface"""
import torch
from .modules.utils import _single, _pair, _triple
import warnings
def _grad_input_padding(grad_output, input_size, stride, padding, kernel_size, dilation=None):
if dilation is None:
# For backward compatibility
warnings.warn("_grad_input_padding 'dilation' argument not provided. Default of 1 is used.")
dilation = [1] * len(stride)
input_size = list(input_size)
k = grad_output.dim() - 2
if len(input_size) == k + 2:
input_size = input_size[-k:]
if len(input_size) != k:
raise ValueError("input_size must have {} elements (got {})"
.format(k + 2, len(input_size)))
def dim_size(d):
return ((grad_output.size(d + 2) - 1) * stride[d] - 2 * padding[d] + 1
+ dilation[d] * (kernel_size[d] - 1))
min_sizes = [dim_size(d) for d in range(k)]
max_sizes = [min_sizes[d] + stride[d] - 1 for d in range(k)]
for size, min_size, max_size in zip(input_size, min_sizes, max_sizes):
if size < min_size or size > max_size:
raise ValueError(
("requested an input grad size of {}, but valid sizes range "
"from {} to {} (for a grad_output of {})").format(
input_size, min_sizes, max_sizes,
grad_output.size()[2:]))
return tuple(input_size[d] - min_sizes[d] for d in range(k))
def conv1d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv1d with respect to the input of the convolution.
This is same as the 1D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kW)
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv1d_input(input.shape, weight, grad_output)
"""
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
kernel_size = [weight.shape[2]]
if input_size is None:
raise ValueError("grad.conv1d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose1d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv1d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv1d_weight(input, weight.shape, grad_output)
"""
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2])
grad_weight = torch.conv1d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels, grad_weight.shape[2]).transpose(
0, 1).narrow(2, 0, weight_size[2])
def conv2d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv2d with respect to the input of the convolution.
This is same as the 2D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv2d_input(input.shape, weight, grad_output)
"""
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
kernel_size = (weight.shape[2], weight.shape[3])
if input_size is None:
raise ValueError("grad.conv2d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose2d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv2d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv2d_weight(input, weight.shape, grad_output)
"""
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1,
1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],
grad_output.shape[3])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2], input.shape[3])
grad_weight = torch.conv2d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels,
grad_weight.shape[2], grad_weight.shape[3]).transpose(0, 1).narrow(
2, 0, weight_size[2]).narrow(3, 0, weight_size[3])
def conv3d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv3d with respect to the input of the convolution.
This is same as the 3D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv3d_input(input.shape, weight, grad_output)
"""
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
kernel_size = (weight.shape[2], weight.shape[3], weight.shape[4])
if input_size is None:
raise ValueError("grad.conv3d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose3d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv3d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, weight, grad_output)
>>> F.grad.conv3d_weight(input, weight.shape, grad_output)
"""
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.repeat(1, in_channels // groups, 1, 1, 1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],
grad_output.shape[3], grad_output.shape[4])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2], input.shape[3],
input.shape[4])
grad_weight = torch.conv3d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3], grad_weight.shape[4])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels, grad_weight.shape[2],
grad_weight.shape[3], grad_weight.shape[4]).transpose(0, 1).narrow(
2, 0, weight_size[2]).narrow(3, 0, weight_size[3]).narrow(
4, 0, weight_size[4])
| 44.522293
| 112
| 0.65422
| 1,924
| 13,980
| 4.60395
| 0.073285
| 0.086927
| 0.020321
| 0.036577
| 0.901671
| 0.891172
| 0.880108
| 0.862836
| 0.846692
| 0.835629
| 0
| 0.023225
| 0.242346
| 13,980
| 313
| 113
| 44.664537
| 0.813066
| 0.466094
| 0
| 0.451852
| 0
| 0
| 0.052451
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059259
| false
| 0
| 0.022222
| 0.007407
| 0.140741
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dc02095b9990e8bc366e15ff2e33e30246529b41
| 108
|
py
|
Python
|
koko_gym/__init__.py
|
berkeleyopenrobotics/blue_mujoco
|
aa73db621f22dac4b76af8748ea6c179d5cb1715
|
[
"MIT"
] | 7
|
2019-04-17T12:50:38.000Z
|
2021-02-11T08:27:17.000Z
|
koko_gym/__init__.py
|
berkeleyopenrobotics/blue_mujoco
|
aa73db621f22dac4b76af8748ea6c179d5cb1715
|
[
"MIT"
] | 2
|
2019-04-16T21:10:08.000Z
|
2019-10-07T00:48:11.000Z
|
koko_gym/envs/__init__.py
|
berkeleyopenarms/blue_mujoco_v1
|
aa73db621f22dac4b76af8748ea6c179d5cb1715
|
[
"MIT"
] | 4
|
2019-04-17T09:06:32.000Z
|
2022-01-26T19:44:24.000Z
|
from koko_gym.envs.koko_reacher import KokoReacherEnv
# from koko_gym.envs.koko_pusher import KokoPusherEnv
| 36
| 53
| 0.87037
| 16
| 108
| 5.625
| 0.5625
| 0.177778
| 0.244444
| 0.333333
| 0.422222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 108
| 2
| 54
| 54
| 0.909091
| 0.472222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
9053d3b1dbebc426557bc7b0a7dc2c53a3e1ec45
| 1,186
|
py
|
Python
|
tests/test_provider_vmware_vmc.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
tests/test_provider_vmware_vmc.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
tests/test_provider_vmware_vmc.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# tests/test_provider_vmware_vmc.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:30:35 UTC)
def test_provider_import():
import terrascript.provider.vmware.vmc
def test_resource_import():
from terrascript.resource.vmware.vmc import vmc_cluster
from terrascript.resource.vmware.vmc import vmc_public_ip
from terrascript.resource.vmware.vmc import vmc_sddc
from terrascript.resource.vmware.vmc import vmc_site_recovery
from terrascript.resource.vmware.vmc import vmc_srm_node
def test_datasource_import():
from terrascript.data.vmware.vmc import vmc_connected_accounts
from terrascript.data.vmware.vmc import vmc_customer_subnets
from terrascript.data.vmware.vmc import vmc_org
from terrascript.data.vmware.vmc import vmc_sddc
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.vmware.vmc
#
# t = terrascript.provider.vmware.vmc.vmc()
# s = str(t)
#
# assert 'https://github.com/vmware/terraform-provider-vmc' in s
# assert '1.7.0' in s
| 27.581395
| 80
| 0.763912
| 168
| 1,186
| 5.238095
| 0.422619
| 0.132955
| 0.153409
| 0.184091
| 0.4875
| 0.401136
| 0.401136
| 0
| 0
| 0
| 0
| 0.01497
| 0.155143
| 1,186
| 42
| 81
| 28.238095
| 0.863273
| 0.397133
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0
| 1
| 0.230769
| true
| 0
| 1
| 0
| 1.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
9056b75ca3f36db1495d0549e7a10b0eecff7981
| 92
|
py
|
Python
|
corehq/apps/domain/tests/__init__.py
|
dslowikowski/commcare-hq
|
ad8885cf8dab69dc85cb64f37aeaf06106124797
|
[
"BSD-3-Clause"
] | 1
|
2015-02-10T23:26:39.000Z
|
2015-02-10T23:26:39.000Z
|
corehq/apps/domain/tests/__init__.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/domain/tests/__init__.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from .test_views import *
from .test_utils import *
| 18.4
| 38
| 0.815217
| 13
| 92
| 5.230769
| 0.538462
| 0.294118
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141304
| 92
| 4
| 39
| 23
| 0.860759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
906e5df00973a5b4a0c6c19d336178dfe3f3f02a
| 54,572
|
py
|
Python
|
tests/test_stereo.py
|
dumasl/Pandora
|
6bae22926e59bcd02d7f6f9485bd5715ffceb450
|
[
"Apache-2.0"
] | null | null | null |
tests/test_stereo.py
|
dumasl/Pandora
|
6bae22926e59bcd02d7f6f9485bd5715ffceb450
|
[
"Apache-2.0"
] | 1
|
2020-09-29T10:57:08.000Z
|
2020-09-29T12:21:17.000Z
|
tests/test_stereo.py
|
dumasl/Pandora
|
6bae22926e59bcd02d7f6f9485bd5715ffceb450
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA
#
# https://github.com/CNES/Pandora_pandora
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions to test the cost volume measure step.
"""
import unittest
import logging
import logging.config
import os
import json
import numpy as np
import xarray as xr
import pandora.stereo as stereo
class TestStereo(unittest.TestCase):
"""
TestStereo class allows to test all the methods in the class Stereo,
and the plugins pixel_wise, zncc
"""
def setUp(self):
"""
Method called to prepare the test fixture
"""
# Create a stereo object
data = np.array(([1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 1],
[1, 1, 1, 4, 3, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]), dtype=np.float64)
self.ref = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([1, 1, 1, 2, 2, 2],
[1, 1, 1, 4, 2, 4],
[1, 1, 1, 4, 4, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]), dtype=np.float64)
self.sec = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
def test_ssd_cost(self):
"""
Test the sum of squared difference method
"""
# Squared difference pixel-wise ground truth for the images self.ref, self.sec, with window_size = 1
sd_ground_truth = np.array(([0, 0, 0, 1, 1, 1],
[0, 0, 0, (1-4)**2, 0, (1-4)**2],
[0, 0, 0, 0, (3-4)**2, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]))
# Computes the sd cost for the whole images
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'ssd', 'window_size': 1, 'subpix': 1})
ssd = stereo_matcher.compute_cost_volume(img_ref=self.ref, img_sec=self.sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated sd cost is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(ssd['cost_volume'].sel(disp=0), sd_ground_truth)
# Sum of squared difference pixel-wise ground truth for the images self.ref, self.sec, with window_size = 5
ssd_ground_truth = np.array(([[12., 22.]]))
# Computes the sd cost for the whole images
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'ssd', 'window_size': 5, 'subpix': 1})
ssd = stereo_matcher.compute_cost_volume(img_ref=self.ref, img_sec=self.sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated sd cost is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(ssd['cost_volume'].sel(disp=0), ssd_ground_truth)
def test_sad_cost(self):
"""
Test the absolute difference method
"""
# Absolute difference pixel-wise ground truth for the images self.ref, self.sec
ad_ground_truth = np.array(([0, 0, 0, 1, 1, 1],
[0, 0, 0, abs(1-4), 0, abs(1-4)],
[0, 0, 0, 0, abs(3-4), 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]))
# Computes the ad cost for the whole images
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 1, 'subpix': 1})
sad = stereo_matcher.compute_cost_volume(img_ref=self.ref, img_sec=self.sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated ad cost is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(sad['cost_volume'].sel(disp=0), ad_ground_truth)
# Sum of absolute difference pixel-wise ground truth for the images self.ref, self.sec with window size 5
sad_ground_truth = np.array(([[6., 10.]]))
# Computes the ad cost for the whole images
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 5, 'subpix': 1})
sad = stereo_matcher.compute_cost_volume(img_ref=self.ref, img_sec=self.sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated ad cost is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(sad['cost_volume'].sel(disp=0), sad_ground_truth)
def test_census_cost(self):
"""
Test the census method
"""
data = np.array(([1, 1, 1, 3],
[1, 2, 1, 0],
[2, 1, 0, 1],
[1, 1, 1, 1]), dtype=np.float64)
ref = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([5, 1, 2, 3],
[1, 2, 1, 0],
[2, 2, 0, 1],
[1, 1, 1, 1]), dtype=np.float64)
sec = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
# census ground truth for the images ref, sec, window size = 3 and disp = -1
census_ground_truth_d1 = np.array(([np.nan, 3],
[np.nan, 7]))
# census ground truth for the images ref, sec, window size = 3 and disp = 0
census_ground_truth_d2 = np.array(([1, 2],
[2, 0]))
# census ground truth for the images ref, sec, window size = 3 and disp = 1
census_ground_truth_d3 = np.array(([4, np.nan],
[5, np.nan]))
# Computes the census transform for the images with window size = 3
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'census', 'window_size': 3, 'subpix': 1})
census = stereo_matcher.compute_cost_volume(img_ref=ref, img_sec=sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated census cost is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(census['cost_volume'].sel(disp=-1), census_ground_truth_d1)
np.testing.assert_array_equal(census['cost_volume'].sel(disp=0), census_ground_truth_d2)
np.testing.assert_array_equal(census['cost_volume'].sel(disp=1), census_ground_truth_d3)
def test_point_interval(self):
"""
Test the point interval method
"""
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'census', 'window_size': 3, 'subpix': 1})
# Using the two images in self.ref, self.sec,
# for disparity = 0, the similarity measure will be applied over the whole images
p_ground_truth_disp = (0, self.ref['im'].shape[1])
q_ground_truth_disp = (0, self.sec['im'].shape[1])
calculated_range = stereo_matcher.point_interval(self.ref, self.sec, 0)
# Check if the calculated range is equal to the ground truth
np.testing.assert_array_equal(calculated_range[0], p_ground_truth_disp)
np.testing.assert_array_equal(calculated_range[1], q_ground_truth_disp)
# for disparity = -2, the similarity measure will be applied over the range
# x=2 x=6 x=0 x=4
# 1 1 1 1 1 1 1 2
# 1 1 2 1 1 1 1 4
# 1 4 3 1 1 1 1 4
# 1 1 1 1 1 1 1 1
# 1 1 1 1 1 1 1 1
p_ground_truth_disp = (2, 6)
q_ground_truth_disp = (0, 4)
calculated_range = stereo_matcher.point_interval(self.ref, self.sec, -2)
# Check if the calculated range is equal to the ground truth
np.testing.assert_array_equal(calculated_range[0], p_ground_truth_disp)
np.testing.assert_array_equal(calculated_range[1], q_ground_truth_disp)
def test_cost_volume(self):
"""
Test the cost volume method
"""
# Create simple images
data = np.array(([1, 2, 1, 4],
[6, 2, 7, 4],
[1, 1, 3, 6]), dtype=np.float64)
ref = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([6, 7, 8, 10],
[2, 4, 1, 6],
[9, 10, 1, 2]), dtype=np.float64)
sec = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
# Cost Volume ground truth for the stereo image simple_stereo_imgs,
# with disp_min = -2, disp_max = 1, sad measure and subpixel_offset = 0
ground_truth = np.array([[[np.nan, np.nan, 48, 35],
[np.nan, 40, 43, np.nan]]])
# Computes the Cost Volume for the stereo image simple_stereo_imgs,
# with disp_min = -2, disp_max = 1, sad measure, window_size = 3 and subpix = 1
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 3, 'subpix': 1})
cv = stereo_matcher.compute_cost_volume(ref, sec, disp_min=-2, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated mean is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(cv['cost_volume'].data, ground_truth)
def test_confidence_measure(self):
"""
Test the confidence measure at the matching cost computation step
"""
# load plugins
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 3, 'subpix': 1})
# Compute bright standard deviation inside a window of size 3 and create the confidence measure
std_bright_ground_truth = np.array([[0., np.sqrt(8/9), np.sqrt(10/9), np.sqrt(10/9)],
[0., np.sqrt(8/9), np.sqrt(10/9), np.sqrt(10/9)],
[0., np.sqrt(8/9), np.sqrt(92/81), np.sqrt(92/81)]], dtype=np.float32)
std_bright_ground_truth = std_bright_ground_truth.reshape(3, 4, 1)
# compute with compute_cost_volume
cv = stereo_matcher.compute_cost_volume(self.ref, self.sec, disp_min=-2, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated confidence_measure is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(cv['confidence_measure'].data, std_bright_ground_truth)
def test_popcount32b(self):
"""
Test the popcount32b method
"""
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'census', 'window_size': 3, 'subpix': 1})
# Count the number of symbols that are different from the zero
count_ = stereo_matcher.popcount32b(0b0001000101000)
# Check if the calculated count_ is equal to the ground truth 3.
self.assertEqual(count_, 3)
# Count the number of symbols that are different from the zero
count_ = stereo_matcher.popcount32b(0b0000000000000000000)
# Check if the calculated count_ is equal to the ground truth 0.
self.assertEqual(count_, 0)
def test_zncc_cost(self):
"""
Test the zncc_cost method
"""
# Compute the cost volume for the images self.ref, self.sec,
# with zncc measure, disp = -1, 1 window size = 5 and subpix = 1
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'zncc', 'window_size': 5, 'subpix': 1})
cost_volume_zncc = stereo_matcher.compute_cost_volume(self.ref, self.sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Ground truth zncc cost for the disparity -1
x = self.ref['im'].data[:, 1:]
y = self.sec['im'].data[:, :5]
ground_truth = np.array(([[np.nan, (np.mean(x * y) - (np.mean(x) * np.mean(y))) / (np.std(x) * np.std(y))]]))
# Check if the calculated cost volume for the disparity -1 is equal to the ground truth
np.testing.assert_allclose(cost_volume_zncc['cost_volume'][:, :, 0], ground_truth, rtol=1e-05)
# Ground truth zncc cost for the disparity 1
x = self.ref['im'].data[:, :5]
y = self.sec['im'].data[:, 1:]
ground_truth = np.array(([[(np.mean(x * y) - (np.mean(x) * np.mean(y))) / (np.std(x) * np.std(y)), np.nan]]))
# Check if the calculated cost volume for the disparity 1 is equal to the ground truth
np.testing.assert_allclose(cost_volume_zncc['cost_volume'][:, :, 2], ground_truth, rtol=1e-05)
def test_subpixel_offset(self):
"""
Test the cost volume method with 2 subpixel disparity
"""
# Create a stereo object with simple images
data = np.array(([7, 8, 1, 0, 2],
[4, 5, 2, 1, 0],
[8, 9, 10, 0, 0]), dtype=np.float64)
ref = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([1, 5, 6, 3, 4],
[2, 5, 10, 6, 9],
[0, 7, 5, 3, 1]), dtype=np.float64)
sec = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
# Computes the cost volume for disp min -2 disp max 2 and subpix = 2
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 3, 'subpix': 2})
cv_zncc_subpixel = stereo_matcher.compute_cost_volume(ref, sec, disp_min=-2, disp_max=2,
**{'valid_pixels': 0, 'no_data': 1})
# Test the disparity range
disparity_range_compute = cv_zncc_subpixel.coords['disp'].data
disparity_range_ground_truth = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]
# Check if the calculated disparity range is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disparity_range_compute, disparity_range_ground_truth)
# Cost volume ground truth with subpixel precision 0.5
cost_volume_ground_truth = np.array([[[np.nan, np.nan, np.nan, np.nan, 39, 32.5, 28, 34.5, 41],
[np.nan, np.nan, 49, 41.5, 34, 35.5, 37, np.nan, np.nan],
[45, 42.5, 40, 40.5, 41, np.nan, np.nan, np.nan, np.nan]]])
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(cv_zncc_subpixel['cost_volume'].data, cost_volume_ground_truth)
def test_masks_invalid_pixels(self):
"""
Test the method masks_invalid_pixels
"""
# ------------ Test the method with a reference mask ( secondary mask contains valid pixels ) ------------
# Mask convention
# cfg['image']['valid_pixels'] = 0
# cfg['image']['no_data'] = 1
# invalid_pixels all other values
data = np.array(([1, 1, 1, 3, 4],
[1, 2, 1, 0, 2],
[2, 1, 0, 1, 2],
[1, 1, 1, 1, 4]), dtype=np.float64)
mask = np.array(([0, 0, 2, 0, 1],
[0, 2, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 2]), dtype=np.int16)
ref = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([5, 1, 2, 3, 4],
[1, 2, 1, 0, 2],
[2, 2, 0, 1, 4],
[1, 1, 1, 1, 2]), dtype=np.float64)
# Secondary mask contains valid pixels
mask = np.zeros((4, 5), dtype=np.int16)
sec = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
stereo_ = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 3, 'subpix': 1})
# ref_dil, sec_dil = stereo_.masks_dilatation(ref, sec, 1, 3, {'valid_pixels': 0, 'no_data': 1})
# print ('ref_dil ', ref_dil)
# exit()
# Compute the cost volume and invalidate pixels if need
cv = stereo_.compute_cost_volume(img_ref=ref, img_sec=sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Cost volume before invalidation
# disp -1 0 1
# Row 1
# col 1 [[[nan, 6., 8.],
# col 2 [12., 2., 13.],
# col 3 [10., 3., nan]],
#
# Row 2
# col 1 [[nan, 1., 5.],
# col 2 [7., 1., 10.],
# col 3 [11., 4., nan]]], dtype=float32)
# Cost volume ground truth after invalidation
cv_ground_truth = np.array([[[np.nan, np.nan, np.nan],
[12, 2., 13.],
[np.nan, np.nan, np.nan]],
[[np.nan, np.nan, np.nan],
[7., 1., 10.],
[11., 4., np.nan]]], dtype=np.float32)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(cv['cost_volume'], cv_ground_truth)
# ------------ Test the method with a secondary mask ( reference mask contains valid pixels ) ------------
# Mask convention
# cfg['image']['valid_pixels'] = 0
# cfg['image']['no_data'] = 1
# invalid_pixels all other values
data = np.array(([1, 1, 1, 3, 4],
[1, 2, 1, 0, 2],
[2, 1, 0, 1, 2],
[1, 1, 1, 1, 4]), dtype=np.float64)
# Reference mask contains valid pixels
mask = np.zeros((4, 5), dtype=np.int16)
ref = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([5, 1, 2, 3, 4],
[1, 2, 1, 0, 2],
[2, 2, 0, 1, 4],
[1, 1, 1, 1, 2]), dtype=np.float64)
mask = np.array(([0, 0, 0, 0, 2],
[0, 1, 0, 0, 0],
[0, 2, 0, 2, 0],
[1, 0, 0, 0, 0]), dtype=np.int16)
sec = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
stereo_ = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 3, 'subpix': 1})
# Compute the cost volume and invalidate pixels if need
cv = stereo_.compute_cost_volume(img_ref=ref, img_sec=sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Cost volume before invalidation
# disp -1 0 1
# Row 1
# col 1 [[[nan, 6., 8.],
# col 2 [12., 2., 13.],
# col 3 [10., 3., nan]],
#
# Row 2
# col 1 [[nan, 1., 5.],
# col 2 [7., 1., 10.],
# col 3 [11., 4., nan]]], dtype=float32)
# Cost volume ground truth after invalidation
cv_ground_truth = np.array([[[np.nan, np.nan, np.nan],
[np.nan, np.nan, 13.],
[np.nan, 3., np.nan]],
[[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]]], dtype=np.float32)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(cv['cost_volume'], cv_ground_truth)
# ------------ Test the method with a reference and secondary mask ------------
# Mask convention
# cfg['image']['valid_pixels'] = 0
# cfg['image']['no_data'] = 1
# invalid_pixels all other values
data = np.array(([1, 1, 1, 3, 4],
[1, 2, 1, 0, 2],
[2, 1, 0, 1, 2],
[1, 1, 1, 1, 4]), dtype=np.float64)
# Reference mask contains valid pixels
mask = np.array(([1, 0, 0, 2, 0],
[0, 0, 0, 0, 0],
[0, 0, 2, 0, 0],
[2, 0, 0, 0, 1]), dtype=np.int16)
ref = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([5, 1, 2, 3, 4],
[1, 2, 1, 0, 2],
[2, 2, 0, 1, 4],
[1, 1, 1, 1, 2]), dtype=np.float64)
mask = np.array(([0, 2, 0, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 2, 0],
[1, 0, 2, 0, 0]), dtype=np.int16)
sec = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
stereo_ = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 3, 'subpix': 1})
# Compute the cost volume and invalidate pixels if need
cv = stereo_.compute_cost_volume(img_ref=ref, img_sec=sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Cost volume before invalidation
# disp -1 0 1
# Row 1
# col 1 [[[nan, 6., 8.],
# col 2 [12., 2., 13.],
# col 3 [10., 3., nan]],
#
# Row 2
# col 1 [[nan, 1., 5.],
# col 2 [7., 1., 10.],
# col 3 [11., 4., nan]]], dtype=float32)
# Cost volume ground truth after invalidation
cv_ground_truth = np.array([[[np.nan, np.nan, np.nan],
[12, 2, np.nan],
[10, np.nan, np.nan]],
[[np.nan, np.nan, 5],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]]], dtype=np.float32)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(cv['cost_volume'], cv_ground_truth)
# ------------ Test the method with a reference and secondary mask and window size 5 ------------
# Mask convention
# cfg['image']['valid_pixels'] = 0
# cfg['image']['no_data'] = 1
# invalid_pixels all other values
data = np.array(([0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 3, 4, 0],
[0, 1, 2, 1, 0, 2, 0],
[0, 2, 1, 0, 1, 2, 0],
[0, 1, 1, 1, 1, 4, 0],
[0, 0, 0, 0, 0, 0, 0]), dtype=np.float64)
mask = np.array(([2, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 2, 0],
[1, 0, 0, 0, 0, 0, 2]), dtype=np.int16)
ref = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([0, 0, 0, 0, 0, 0, 0],
[0, 5, 1, 2, 3, 4, 0],
[0, 1, 2, 1, 0, 2, 0],
[0, 2, 2, 0, 1, 4, 0],
[0, 1, 1, 1, 1, 2, 0],
[0, 0, 0, 0, 0, 0, 0]), dtype=np.float64)
mask = np.array(([1, 0, 0, 0, 0, 0, 2],
[0, 0, 0, 0, 0, 0, 0],
[2, 0, 2, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 2],
[0, 0, 0, 0, 0, 0, 0],
[2, 0, 0, 0, 0, 0, 1]), dtype=np.int16)
sec = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
stereo_ = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 5, 'subpix': 1})
# Compute the cost volume and invalidate pixels if need
cv = stereo_.compute_cost_volume(img_ref=ref, img_sec=sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Cost volume ground truth after invalidation
cv_ground_truth = np.array([[[np.nan, np.nan, 24.],
[np.nan, 10., 27.],
[np.nan, np.nan, np.nan]],
[[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[31., np.nan, np.nan]]], dtype=np.float32)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(cv['cost_volume'], cv_ground_truth)
# ------------ Test the method with a reference and secondary mask with window size 1------------
# Mask convention
# cfg['image']['valid_pixels'] = 0
# cfg['image']['no_data'] = 1
# invalid_pixels all other values
data = np.array(([1, 1, 1, 3, 4],
[1, 1, 1, 1, 4]), dtype=np.float64)
# Reference mask contains valid pixels
mask = np.array(([1, 0, 0, 2, 0],
[2, 0, 0, 0, 1]), dtype=np.int16)
ref = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([5, 1, 2, 3, 4],
[1, 1, 1, 1, 2]), dtype=np.float64)
mask = np.array(([0, 2, 0, 0, 1],
[1, 0, 2, 0, 0]), dtype=np.int16)
sec = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
stereo_ = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 1, 'subpix': 1})
# Compute the cost volume and invalidate pixels if need
cv = stereo_.compute_cost_volume(img_ref=ref, img_sec=sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Cost volume ground truth after invalidation
cv_ground_truth = np.array([[[np.nan, np.nan, np.nan],
[4, np.nan, 1],
[np.nan, 1, 2],
[np.nan, np.nan, np.nan],
[1, np.nan, np.nan]],
[[np.nan, np.nan, np.nan],
[np.nan, 0, np.nan],
[0, np.nan, 0],
[np.nan, 0, 1],
[np.nan, np.nan, np.nan]]], dtype=np.float32)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(cv['cost_volume'], cv_ground_truth)
# ------------ Test the method with a reference and secondary mask with window size 3 and ZNCC ------------
# Mask convention
# cfg['image']['valid_pixels'] = 0
# cfg['image']['no_data'] = 1
# invalid_pixels all other values
data = np.array(([1, 1, 1, 3, 4],
[1, 2, 1, 0, 2],
[2, 1, 0, 1, 2],
[1, 1, 1, 1, 4]), dtype=np.float64)
# Reference mask contains valid pixels
mask = np.array(([1, 0, 0, 2, 0],
[0, 0, 0, 0, 0],
[0, 0, 2, 0, 0],
[2, 0, 0, 0, 1]), dtype=np.int16)
ref = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([5, 1, 2, 3, 4],
[1, 2, 1, 0, 2],
[2, 2, 0, 1, 4],
[1, 1, 1, 1, 2]), dtype=np.float64)
mask = np.array(([0, 2, 0, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 2, 0],
[1, 0, 2, 0, 0]), dtype=np.int16)
sec = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
stereo_ = stereo.AbstractStereo(**{'stereo_method': 'zncc', 'window_size': 3, 'subpix': 1})
# Compute the cost volume and invalidate pixels if need
cv = stereo_.compute_cost_volume(img_ref=ref, img_sec=sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Cost volume ground truth after invalidation
cv_ground_truth = np.array([[[np.nan, np.nan, np.nan],
[0.02146693953705469, 0.8980265101338747, np.nan],
[0.40624999999999994, np.nan, np.nan]],
[[np.nan, np.nan, 0.2941742027072762],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]]], dtype=np.float32)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(cv['cost_volume'], cv_ground_truth)
def test_masks_invalid_pixels_subpixel(self):
"""
Test the method masks_invalid_pixels with subpixel precision
"""
# ------------ Test the method with a secondary mask with window size 1 subpixel 2 ------------
# Mask convention
# cfg['image']['valid_pixels'] = 0
# cfg['image']['no_data'] = 1
# invalid_pixels all other values
data = np.array(([1, 1, 1, 3, 4],
[1, 1, 1, 1, 4]), dtype=np.float64)
# Reference mask contains valid pixels
mask = np.array(([0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]), dtype=np.int16)
ref = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([5, 1, 2, 3, 4],
[1, 1, 1, 1, 2]), dtype=np.float64)
mask = np.array(([0, 0, 0, 0, 1],
[1, 0, 2, 0, 0]), dtype=np.int16)
sec = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
stereo_ = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 1, 'subpix': 2})
# Compute the cost volume and invalidate pixels if need
cv = stereo_.compute_cost_volume(img_ref=ref, img_sec=sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# The cost volume before invalidation
# <xarray.DataArray 'cost_volume' (row: 2, col: 5, disp: 5)>
# array([[[nan, nan, 4. , 2. , 0. ],
# [4. , 2. , 0. , 0.5, 1. ],
# [0. , 0.5, 1. , 1.5, 2. ],
# [1. , 0.5, 0. , 0.5, 1. ],
# [1. , 0.5, 0. , nan, nan]],
#
# [[nan, nan, 0. , 0. , 0. ],
# [0. , 0. , 0. , 0. , 0. ],
# [0. , 0. , 0. , 0. , 0. ],
# [0. , 0. , 0. , 0.5, 1. ],
# [3. , 2.5, 2. , nan, nan]]], dtype=float32)
# Coordinates:
# * row (row) int64 0 1
# * col (col) int64 0 1 2 3 4
# * disp (disp) float64 -1.0 -0.5 0.0 0.5 1.0
cv_ground_truth = np.array([[[np.nan, np.nan, 4, 2, 0],
[ 4, 2, 0, 0.5, 1],
[ 0, 0.5, 1, 1.5, 2],
[ 1, 0.5, 0, np.nan, np.nan],
[ 1, np.nan, np.nan, np.nan, np.nan]],
[[np.nan, np.nan, np.nan, np.nan, 0],
[np.nan, np.nan, 0, np.nan, np.nan],
[ 0, np.nan, np.nan, np.nan, 0],
[np.nan, np.nan, 0, 0.5, 1],
[ 3, 2.5, 2, np.nan, np.nan]]], dtype=np.float32)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(cv['cost_volume'], cv_ground_truth)
# ------------ Test the method with a secondary mask with window size 1 subpixel 4 ------------
# Mask convention
# cfg['image']['valid_pixels'] = 5
# cfg['image']['no_data'] = 7
# invalid_pixels all other values
data = np.array(([1, 1, 1],
[1, 1, 1]), dtype=np.float64)
# Reference mask contains valid pixels
mask = np.array(([5, 5, 5],
[5, 5, 5]), dtype=np.int16)
ref = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([5, 1, 2],
[1, 1, 1]), dtype=np.float64)
mask = np.array(([5, 4, 7],
[6, 7, 5]), dtype=np.int16)
sec = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
stereo_ = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 1, 'subpix': 4})
# Compute the cost volume and invalidate pixels if need
cv = stereo_.compute_cost_volume(img_ref=ref, img_sec=sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 5, 'no_data': 7})
# The cost volume before invalidation
# <xarray.DataArray 'cost_volume' (row: 2, col: 5, disp: 5)>
# array([[[ nan, nan, nan, nan, 4. , 3. , 2. , 1. , 0. ],
# [4. , 3. , 2. , 1. , 0. , 0.25, 0.5 , 0.75, 1. ],
# [0. , 0.25, 0.5 , 0.75, 1. , nan, nan, nan, nan]],
#
# [[ nan, nan, nan, nan, 0. , 0. , 0. , 0. , 0. ],
# [0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
# [0. , 0. , 0. , 0. , 0. , nan, nan, nan, nan]]],
# dtype=float32)
# Coordinates:
# * row (row) int64 0 1
# * col (col) int64 0 1 2
# * disp (disp) float64 -1.0 -0.75 -0.5 -0.25 0.0 0.25 0.5 0.75 1.0
cv_ground_truth = np.array([[
[np.nan, np.nan, np.nan, np.nan, 4. , np.nan, np.nan, np.nan, np.nan],
[4. , np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]],
[[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 0. ],
[np.nan, np.nan, np.nan, np.nan, 0. , np.nan, np.nan, np.nan, np.nan]]], dtype=np.float32)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(cv['cost_volume'], cv_ground_truth)
# ------------ Test the method with a reference and secondary mask, window size 3, subpixel 2 ------------
# Mask convention
# cfg['image']['valid_pixels'] = 5
# cfg['image']['no_data'] = 7
# invalid_pixels all other values
data = np.array(([1, 1, 1, 3, 4],
[1, 2, 1, 0, 2],
[2, 1, 0, 1, 2],
[1, 1, 1, 1, 4]), dtype=np.float64)
mask = np.array(([5, 56, 5, 12, 5],
[5, 5, 5, 5, 5],
[5, 5, 5, 5, 5],
[3, 5, 4, 5, 7]), dtype=np.int16)
ref = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([5, 1, 2, 3, 4],
[1, 2, 1, 0, 2],
[2, 2, 0, 1, 4],
[1, 1, 1, 1, 2]), dtype=np.float64)
mask = np.array(([7, 5, 5, 5, 5],
[5, 5, 5, 65, 5],
[5, 5, 5, 5, 5],
[5, 23, 5, 5, 2]), dtype=np.int16)
sec = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
stereo_ = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 3, 'subpix': 2})
# Compute the cost volume and invalidate pixels if need
cv = stereo_.compute_cost_volume(img_ref=ref, img_sec=sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 5, 'no_data': 7})
# Cost volume before invalidation
# array([[[ nan, nan, 6. , 6. , 8. ],
# [12. , 7. , 2. , 6.5, 13. ],
# [10. , 5.5, 3. , nan, nan]],
#
# [[ nan, nan, 1. , 2. , 5. ],
# [ 7. , 4. , 1. , 4.5, 10. ],
# [11. , 6.5, 4. , nan, nan]]], dtype=float32)
# Coordinates:
# * row (row) int64 1 2
# * col (col) int64 1 2 3
# * disp (disp) float64 -1.0 -0.5 0.0 0.5 1.0
# Cost volume ground truth after invalidation
cv_ground_truth = np.array([[[np.nan, np.nan, np.nan, np.nan, 8. ],
[np.nan, np.nan, 2. , np.nan, np.nan],
[10. , np.nan, np.nan, np.nan, np.nan]],
[[np.nan, np.nan, 1. , 2. , 5. ],
[7. , 4. , 1. , 4.5 , 10. ],
[np.nan, np.nan, np.nan, np.nan, np.nan]]], dtype=np.float32)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(cv['cost_volume'], cv_ground_truth)
# ------------ Test the method with a reference and secondary mask with window size 3 and census ------------
# Mask convention
# cfg['image']['valid_pixels'] = 5
# cfg['image']['no_data'] = 7
# invalid_pixels all other values
data = np.array(([1, 1, 1, 3],
[1, 2, 1, 0],
[2, 1, 0, 1],
[1, 1, 1, 1]), dtype=np.float64)
mask = np.array(([7, 5, 5, 2],
[0, 5, 5, 5],
[5, 5, 5, 0],
[0, 5, 5, 7]), dtype=np.int16)
ref = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([5, 1, 2, 3],
[1, 2, 1, 0],
[2, 2, 0, 1],
[1, 1, 1, 1]), dtype=np.float64)
mask = np.array(([2, 5, 5, 2],
[0, 5, 2, 5],
[5, 5, 5, 0],
[7, 5, 5, 5]), dtype=np.int16)
sec = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
# Cost volume ground truth after invalidation
census_ground_truth = np.array([[[np.nan, np.nan, np.nan, np.nan, np.nan],
[3., np.nan, np.nan, np.nan, np.nan]],
[[np.nan, np.nan, np.nan, np.nan, 5.],
[np.nan, np.nan, np.nan, np.nan, np.nan]]], dtype=np.float32)
# Computes the census transform for the images with window size = 3
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'census', 'window_size': 3, 'subpix': 2})
census = stereo_matcher.compute_cost_volume(img_ref=ref, img_sec=sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 5, 'no_data': 7})
# Check if the calculated census cost is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(census['cost_volume'], census_ground_truth)
# ------------ Test the method with a reference and secondary mask with window size 3 and ZNCC ------------
data = np.array(([1, 1, 1, 3, 4],
[1, 2, 1, 0, 2],
[2, 1, 0, 1, 2],
[1, 1, 1, 1, 4]), dtype=np.float64)
# Reference mask contains valid pixels
mask = np.array(([1, 0, 0, 2, 0],
[0, 0, 0, 0, 0],
[0, 0, 2, 0, 0],
[2, 0, 0, 0, 1]), dtype=np.int16)
ref = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([5, 1, 2, 3, 4],
[1, 2, 1, 0, 2],
[2, 2, 0, 1, 4],
[1, 1, 1, 1, 2]), dtype=np.float64)
mask = np.array(([0, 2, 0, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 2, 0],
[1, 0, 2, 0, 0]), dtype=np.int16)
sec = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
stereo_ = stereo.AbstractStereo(**{'stereo_method': 'zncc', 'window_size': 3, 'subpix': 2})
# Compute the cost volume and invalidate pixels if need
cv = stereo_.compute_cost_volume(img_ref=ref, img_sec=sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Cost volume ground truth after invalidation
cv_ground_truth = np.array([[[np.nan, np.nan, np.nan, np.nan, np.nan],
[0.02146693953705469, 0.5486081, 0.8980265101338747, np.nan, np.nan],
[0.40624999999999994, np.nan, np.nan, np.nan, np.nan]],
[[np.nan, np.nan, np.nan, np.nan, 0.2941742027072762],
[np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan]]], dtype=np.float32)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(cv['cost_volume'], cv_ground_truth)
def test_masks_dilatation(self):
"""
Test the method masks_dilatation
"""
# Mask convention
# cfg['image']['valid_pixels'] = 5
# cfg['image']['no_data'] = 7
# invalid_pixels all other values
data = np.array(([1, 1, 1, 3, 4],
[1, 2, 1, 0, 2],
[2, 1, 0, 1, 2],
[1, 1, 1, 1, 4]), dtype=np.float64)
mask = np.array(([5, 56, 5, 12, 5],
[5, 5, 5, 5, 5],
[5, 5, 5, 5, 5],
[3, 5, 4, 5, 7]), dtype=np.int16)
ref = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
data = np.array(([5, 1, 2, 3, 4],
[1, 2, 1, 0, 2],
[2, 2, 0, 1, 4],
[1, 1, 1, 1, 2]), dtype=np.float64)
mask = np.array(([7, 5, 5, 5, 5],
[5, 5, 5, 65, 5],
[5, 5, 5, 5, 5],
[5, 23, 5, 5, 2]), dtype=np.int16)
sec = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
# masks_dilatation(self, img_ref, img_sec, offset_row_col, window_size, subp, cfg)
stereo_ = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 3, 'subpix': 4})
# Compute the dilated / shifted masks
mask_ref, masks_sec = stereo_.masks_dilatation(img_ref=ref, img_sec=sec, offset_row_col=int((3 - 1) / 2),
window_size=3, subp=4, cfg={'valid_pixels': 5, 'no_data': 7})
# Reference mask ground truth
gt_ref = np.array([[0, 0, 0],
[0, 0, np.nan]], dtype=np.float32)
gt_ref = xr.DataArray(gt_ref, coords=[[1, 2], [1, 2, 3]], dims=['row', 'col'])
# Check if the calculated reference masks is equal to the ground truth (same dimensions, coordinates and values)
if not mask_ref.equals(gt_ref):
raise ValueError('test_masks_dilatation error : reference mask ')
# Secondary mask ground truth with pixel precision
gt_sec_pixel = np.array([[np.nan, 0, np.nan],
[0, 0, 0]], dtype=np.float32)
gt_sec_pixel = xr.DataArray(gt_sec_pixel, coords=[[1, 2], [1, 2, 3]], dims=['row', 'col'])
if not masks_sec[0].equals(gt_sec_pixel):
raise ValueError('test_masks_dilatation error : secondary mask ')
# Secondary mask ground truth with sub-pixel precision
gt_sec_subpixel = np.array([[np.nan, np.nan],
[0, 0]], dtype=np.float32)
gt_sec_subpixel = xr.DataArray(gt_sec_subpixel, coords=[[1, 2], [1.5, 2.5]], dims=['row', 'col'])
if not masks_sec[1].equals(gt_sec_subpixel):
raise ValueError('test_masks_dilatation error : secondary shifted mask ')
def test_cmax(self):
"""
Test the cmax attribute of the cost volume
"""
# Test cmax for the census mesure
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'census', 'window_size': 3, 'subpix': 1})
census_cmax_w3 = stereo_matcher.compute_cost_volume(img_ref=self.ref, img_sec=self.sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated maximal cost is equal to the ground truth
np.testing.assert_array_equal(census_cmax_w3.attrs['cmax'], 9)
assert (np.nanmax(census_cmax_w3['cost_volume'].data) <= 9)
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'census', 'window_size': 5, 'subpix': 1})
census_cmax_w5 = stereo_matcher.compute_cost_volume(img_ref=self.ref, img_sec=self.sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated maximal cost is equal to the ground truth
np.testing.assert_array_equal(census_cmax_w5.attrs['cmax'], 25)
assert (np.nanmax(census_cmax_w5['cost_volume'].data) <= 25)
# Test cmax for the sad mesure
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 3, 'subpix': 1})
sad_cmax_w3 = stereo_matcher.compute_cost_volume(img_ref=self.ref, img_sec=self.sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated maximal cost is equal to the ground truth
np.testing.assert_array_equal(sad_cmax_w3.attrs['cmax'], int(abs(4 - 1) * (3**2)))
assert (np.nanmax(sad_cmax_w3['cost_volume'].data) <= int(abs(4 - 1) * (3**2)))
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'sad', 'window_size': 5, 'subpix': 1})
sad_cmax_w5 = stereo_matcher.compute_cost_volume(img_ref=self.ref, img_sec=self.sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated maximal cost is equal to the ground truth
np.testing.assert_array_equal(sad_cmax_w5.attrs['cmax'], int(abs(4 - 1) * (5**2)))
assert (np.nanmax(sad_cmax_w3['cost_volume'].data) <= int(abs(4 - 1) * (5**2)))
# Test cmax for the ssd mesure
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'ssd', 'window_size': 3, 'subpix': 1})
ssd_cmax_w3 = stereo_matcher.compute_cost_volume(img_ref=self.ref, img_sec=self.sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated maximal cost is equal to the ground truth
np.testing.assert_array_equal(ssd_cmax_w3.attrs['cmax'], int(abs(4 - 1)**2 * (3**2)))
assert (np.nanmax(sad_cmax_w3['cost_volume'].data) <= int(abs(4 - 1)**2 * (3**2)))
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'ssd', 'window_size': 5, 'subpix': 1})
ssd_cmax_w5 = stereo_matcher.compute_cost_volume(img_ref=self.ref, img_sec=self.sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated maximal cost is equal to the ground truth
np.testing.assert_array_equal(ssd_cmax_w5.attrs['cmax'], int(abs(4 - 1)**2 * (5**2)))
assert (np.nanmax(sad_cmax_w3['cost_volume'].data) <= int(abs(4 - 1)**2 * (5**2)))
# Test cmax for the zncc mesure
stereo_matcher = stereo.AbstractStereo(**{'stereo_method': 'zncc', 'window_size': 3, 'subpix': 1})
zncc_cmax = stereo_matcher.compute_cost_volume(img_ref=self.ref, img_sec=self.sec, disp_min=-1, disp_max=1,
**{'valid_pixels': 0, 'no_data': 1})
# Check if the calculated maximal cost is equal to the ground truth
np.testing.assert_array_equal(zncc_cmax.attrs['cmax'], 1)
assert (np.nanmax(zncc_cmax['cost_volume'].data) <= 1)
def setup_logging(path='logging.json', default_level=logging.WARNING,):
"""
Setup the logging configuration
:param path: path to the configuration file
:type path: string
:param default_level: default level
:type default_level: logging level
"""
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
if __name__ == '__main__':
setup_logging()
unittest.main()
| 51.049579
| 120
| 0.474932
| 7,228
| 54,572
| 3.477726
| 0.044964
| 0.022914
| 0.024347
| 0.072005
| 0.847675
| 0.827306
| 0.809524
| 0.79019
| 0.776982
| 0.761189
| 0
| 0.07023
| 0.366745
| 54,572
| 1,068
| 121
| 51.097378
| 0.657156
| 0.243971
| 0
| 0.565292
| 0
| 0
| 0.068276
| 0.001552
| 0
| 0
| 0
| 0
| 0.075601
| 1
| 0.025773
| false
| 0
| 0.013746
| 0
| 0.041237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
90a6c60b8bc83fefad694c98f264f7f22f86d578
| 30
|
py
|
Python
|
meracanapi/dynamodb/__init__.py
|
meracan/meracan-api
|
aff04f3d9d0dce46fe0b8ce89394ec22823a0ea4
|
[
"MIT"
] | null | null | null |
meracanapi/dynamodb/__init__.py
|
meracan/meracan-api
|
aff04f3d9d0dce46fe0b8ce89394ec22823a0ea4
|
[
"MIT"
] | null | null | null |
meracanapi/dynamodb/__init__.py
|
meracan/meracan-api
|
aff04f3d9d0dce46fe0b8ce89394ec22823a0ea4
|
[
"MIT"
] | null | null | null |
from .dynamodb import DynamoDB
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
90c842e98bccecd20ae9d56b28dc84bcde1c9c80
| 4,635
|
py
|
Python
|
src/interfaces/api/serializers/organism.py
|
cruz-f/protrend
|
b72c17fa1606b4cf5ca6d60c51737b43ba3fdbc1
|
[
"MIT"
] | null | null | null |
src/interfaces/api/serializers/organism.py
|
cruz-f/protrend
|
b72c17fa1606b4cf5ca6d60c51737b43ba3fdbc1
|
[
"MIT"
] | 1
|
2022-02-11T18:38:39.000Z
|
2022-02-11T18:38:39.000Z
|
src/interfaces/api/serializers/organism.py
|
cruz-f/protrend
|
b72c17fa1606b4cf5ca6d60c51737b43ba3fdbc1
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from constants import help_text
from data import Organism
from interfaces.serializers.base import BaseSerializer
from interfaces.serializers.fields import SourceField, URLField
from interfaces.serializers.relationship import RelationshipSerializer, SourceRelationshipSerializer
class OrganismListSerializer(BaseSerializer):
model = Organism
# properties
name = serializers.CharField(required=True, max_length=200, help_text=help_text.organism_name)
ncbi_taxonomy = serializers.IntegerField(required=False, min_value=0, help_text=help_text.ncbi_taxonomy)
species = serializers.CharField(required=False, max_length=150, help_text=help_text.species)
strain = serializers.CharField(required=False, max_length=150, help_text=help_text.strain)
# write-only
refseq_accession = serializers.CharField(required=False, write_only=True, max_length=50,
help_text=help_text.refseq_accession)
refseq_ftp = serializers.CharField(required=False, write_only=True, max_length=250,
help_text=help_text.refseq_ftp)
genbank_accession = serializers.CharField(required=False, write_only=True, max_length=50,
help_text=help_text.genbank_accession)
genbank_ftp = serializers.CharField(required=False, write_only=True, max_length=250,
help_text=help_text.genbank_ftp)
ncbi_assembly = serializers.IntegerField(required=False, min_value=0, write_only=True,
help_text=help_text.ncbi_assembly)
assembly_accession = serializers.CharField(required=False, write_only=True, max_length=50,
help_text=help_text.assembly_accession)
# url
url = URLField(read_only=True,
view_name='organisms-detail',
lookup_field='protrend_id',
lookup_url_kwarg='protrend_id')
class OrganismDetailSerializer(OrganismListSerializer):
url = None
refseq_accession = serializers.CharField(required=False, max_length=50, help_text=help_text.refseq_accession)
refseq_ftp = serializers.CharField(required=False, max_length=250, help_text=help_text.refseq_ftp)
genbank_accession = serializers.CharField(required=False, max_length=50, help_text=help_text.genbank_accession)
genbank_ftp = serializers.CharField(required=False, max_length=250, help_text=help_text.genbank_ftp)
ncbi_assembly = serializers.IntegerField(required=False, min_value=0, help_text=help_text.ncbi_assembly)
assembly_accession = serializers.CharField(required=False, max_length=50, help_text=help_text.assembly_accession)
# relationships
data_source = SourceRelationshipSerializer(read_only=True,
child=SourceField(read_only=True))
regulator = RelationshipSerializer(read_only=True,
child=serializers.HyperlinkedRelatedField(
read_only=True,
view_name='regulators-detail',
lookup_field='protrend_id',
lookup_url_kwarg='protrend_id'))
gene = RelationshipSerializer(read_only=True,
child=serializers.HyperlinkedRelatedField(
read_only=True,
view_name='genes-detail',
lookup_field='protrend_id',
lookup_url_kwarg='protrend_id'))
tfbs = RelationshipSerializer(read_only=True,
child=serializers.HyperlinkedRelatedField(
read_only=True,
view_name='binding-sites-detail',
lookup_field='protrend_id',
lookup_url_kwarg='protrend_id'))
regulatory_interaction = RelationshipSerializer(read_only=True,
child=serializers.HyperlinkedRelatedField(
read_only=True,
view_name='interactions-detail',
lookup_field='protrend_id',
lookup_url_kwarg='protrend_id'))
| 60.986842
| 117
| 0.605178
| 429
| 4,635
| 6.244755
| 0.174825
| 0.098544
| 0.071669
| 0.095558
| 0.724524
| 0.717059
| 0.712579
| 0.712579
| 0.712579
| 0.699888
| 0
| 0.01155
| 0.327508
| 4,635
| 75
| 118
| 61.8
| 0.847931
| 0.008414
| 0
| 0.274194
| 0
| 0
| 0.042257
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.096774
| 0
| 0.516129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
90d1540530cd0bc37d6d2d3bebe83bb22376d922
| 113
|
py
|
Python
|
ocrcopy/__init__.py
|
jasonfyw/ocr-copy
|
cb79553f0b323759dd411d1fa5e0379c8ae31ff5
|
[
"MIT"
] | 1
|
2022-03-03T14:27:26.000Z
|
2022-03-03T14:27:26.000Z
|
ocrcopy/__init__.py
|
jasonfyw/ocr-copy
|
cb79553f0b323759dd411d1fa5e0379c8ae31ff5
|
[
"MIT"
] | null | null | null |
ocrcopy/__init__.py
|
jasonfyw/ocr-copy
|
cb79553f0b323759dd411d1fa5e0379c8ae31ff5
|
[
"MIT"
] | null | null | null |
from ocrcopy.controller import Controller
from ocrcopy.overlay import Overlay
from ocrcopy.ocrcopy import OCRCopy
| 37.666667
| 41
| 0.876106
| 15
| 113
| 6.6
| 0.333333
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097345
| 113
| 3
| 42
| 37.666667
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
294dc58ea58d1fb4a0ccdc5242a5d8d3d33e730a
| 2,700
|
py
|
Python
|
utils.py
|
AstroJacobLi/FootballTeamStrategy
|
d8649d38ed80217f226cfd7de7a5e53969e078b9
|
[
"MIT"
] | 3
|
2020-02-14T06:13:14.000Z
|
2020-02-15T09:06:19.000Z
|
utils.py
|
AstroJacobLi/FootballTeamStrategy
|
d8649d38ed80217f226cfd7de7a5e53969e078b9
|
[
"MIT"
] | null | null | null |
utils.py
|
AstroJacobLi/FootballTeamStrategy
|
d8649d38ed80217f226cfd7de7a5e53969e078b9
|
[
"MIT"
] | null | null | null |
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
def calc_mean_std(x):
return (np.mean(x), np.std(x, ddof=1) / np.sqrt(len(x)))
def color_func(p):
if p > 0.2:
return 'dodgerblue'
elif p < 0.05:
return 'orange'
else:
return 'seagreen'
def match_i_Huskies_passing_table(filename, match_i):
'''
Match i-th Huskies players passing table
Return: {playername: [origin, destination]}
'''
passing = pd.read_csv(filename)
player_dic = {}
for i in range(len(passing)):
if passing['MatchID'][i] == match_i:
if passing['TeamID'][i] == 'Huskies':
if passing['OriginPlayerID'][i] not in player_dic:
player_dic[passing['OriginPlayerID'][i]] = [1, 0]
else:
player_dic[passing['OriginPlayerID'][i]][0] += 1
if passing['DestinationPlayerID'][i] not in player_dic:
player_dic[passing['DestinationPlayerID'][i]] = [0, 1]
else:
player_dic[passing['DestinationPlayerID'][i]][1] += 1
return player_dic
def match_i_passing_table(filename, team_id, match_i):
'''
Match i-th {TeamID} players passing table
Return: {playername: [origin, destination]}
'''
passing = pd.read_csv(filename)
player_dic = {}
if match_i == 'all':
for i in range(len(passing)):
if passing['TeamID'][i] == team_id:
if passing['OriginPlayerID'][i] not in player_dic:
player_dic[passing['OriginPlayerID'][i]] = [1, 0]
else:
player_dic[passing['OriginPlayerID'][i]][0] += 1
if passing['DestinationPlayerID'][i] not in player_dic:
player_dic[passing['DestinationPlayerID'][i]] = [0, 1]
else:
player_dic[passing['DestinationPlayerID'][i]][1] += 1
else:
for i in range(len(passing)):
if passing['MatchID'][i] == match_i:
if passing['TeamID'][i] == team_id:
if passing['OriginPlayerID'][i] not in player_dic:
player_dic[passing['OriginPlayerID'][i]] = [1, 0]
else:
player_dic[passing['OriginPlayerID'][i]][0] += 1
if passing['DestinationPlayerID'][i] not in player_dic:
player_dic[passing['DestinationPlayerID'][i]] = [0, 1]
else:
player_dic[passing['DestinationPlayerID'][i]][1] += 1
return player_dic
| 37.5
| 78
| 0.547037
| 310
| 2,700
| 4.629032
| 0.2
| 0.137979
| 0.133798
| 0.050174
| 0.759582
| 0.74007
| 0.74007
| 0.74007
| 0.725436
| 0.725436
| 0
| 0.016376
| 0.321481
| 2,700
| 71
| 79
| 38.028169
| 0.766921
| 0.063704
| 0
| 0.684211
| 0
| 0
| 0.14873
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0.526316
| 0.087719
| 0.017544
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
2965eadfb4546c08af43ecd5e79cfac7cf8a5334
| 29
|
py
|
Python
|
timm/models/resnet_wsl/__init__.py
|
shenyunhang/pytorch-image-models
|
a46205d3e7db602797f39aa2b3a814a52a94f002
|
[
"Apache-2.0"
] | null | null | null |
timm/models/resnet_wsl/__init__.py
|
shenyunhang/pytorch-image-models
|
a46205d3e7db602797f39aa2b3a814a52a94f002
|
[
"Apache-2.0"
] | null | null | null |
timm/models/resnet_wsl/__init__.py
|
shenyunhang/pytorch-image-models
|
a46205d3e7db602797f39aa2b3a814a52a94f002
|
[
"Apache-2.0"
] | null | null | null |
from .resnet_wsl_v2 import *
| 14.5
| 28
| 0.793103
| 5
| 29
| 4.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.137931
| 29
| 1
| 29
| 29
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
296de340b155ea728c37b5660efdc1118633f1f1
| 55,060
|
py
|
Python
|
tests/test_views.py
|
mariaFernando/reana-workflow-controller
|
1219dd4b490512523d27b6f805435340d55e62ae
|
[
"MIT"
] | null | null | null |
tests/test_views.py
|
mariaFernando/reana-workflow-controller
|
1219dd4b490512523d27b6f805435340d55e62ae
|
[
"MIT"
] | null | null | null |
tests/test_views.py
|
mariaFernando/reana-workflow-controller
|
1219dd4b490512523d27b6f805435340d55e62ae
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA-Workflow-Controller module tests."""
import io
import json
import os
import uuid
from zipfile import ZipFile
import fs
import mock
import pytest
from flask import url_for
from reana_db.models import (
Job,
JobCache,
Workflow,
RunStatus,
InteractiveSession,
)
from werkzeug.utils import secure_filename
from reana_workflow_controller.rest.utils import (
create_workflow_workspace,
delete_workflow,
)
from reana_workflow_controller.rest.workflows_status import START, STOP
from reana_workflow_controller.workflow_run_manager import WorkflowRunManager
status_dict = {
START: RunStatus.pending,
STOP: RunStatus.finished,
}
def test_get_workflows(app, session, default_user, cwl_workflow_with_name):
"""Test listing all workflows."""
with app.test_client() as client:
workflow_uuid = uuid.uuid4()
workflow_name = "my_test_workflow"
workflow = Workflow(
id_=workflow_uuid,
name=workflow_name,
status=RunStatus.finished,
owner_id=default_user.id_,
reana_specification=cwl_workflow_with_name["reana_specification"],
type_=cwl_workflow_with_name["reana_specification"]["type"],
logs="",
)
session.add(workflow)
session.commit()
res = client.get(
url_for("workflows.get_workflows"), query_string={"user": default_user.id_}
)
assert res.status_code == 200
response_data = json.loads(res.get_data(as_text=True))["items"]
expected_data = [
{
"id": str(workflow.id_),
"name": workflow.name + ".1", # Add run_number
"status": workflow.status.name,
"user": str(workflow.owner_id),
"created": response_data[0]["created"],
"progress": response_data[0]["progress"],
"size": {"raw": -1, "human_readable": ""},
}
]
assert response_data == expected_data
def test_get_workflows_wrong_user(app):
"""Test list of workflows for unknown user."""
with app.test_client() as client:
random_user_uuid = uuid.uuid4()
res = client.get(
url_for("workflows.get_workflows"), query_string={"user": random_user_uuid}
)
assert res.status_code == 404
def test_get_workflows_missing_user(app):
"""Test listing all workflows with missing user."""
with app.test_client() as client:
res = client.get(url_for("workflows.get_workflows"), query_string={})
assert res.status_code == 400
def test_create_workflow_with_name(
app, session, default_user, cwl_workflow_with_name, tmp_shared_volume_path
):
"""Test create workflow and its workspace by specifying a name."""
with app.test_client() as client:
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
assert res.status_code == 201
response_data = json.loads(res.get_data(as_text=True))
# Check workflow fetch by id
workflow_by_id = Workflow.query.filter(
Workflow.id_ == response_data.get("workflow_id")
).first()
assert workflow_by_id
# Check workflow fetch by name and that name of created workflow
# is the same that was supplied to `api.create_workflow`
workflow_by_name = Workflow.query.filter(
Workflow.name == "my_test_workflow"
).first()
assert workflow_by_name
workflow = workflow_by_id
# Check that the workflow workspace exists
assert os.path.exists(workflow.workspace_path)
def test_create_workflow_without_name(
app, session, default_user, cwl_workflow_without_name, tmp_shared_volume_path
):
"""Test create workflow and its workspace without specifying a name."""
with app.test_client() as client:
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(cwl_workflow_without_name),
)
assert res.status_code == 201
response_data = json.loads(res.get_data(as_text=True))
# Check workflow fetch by id
workflow_by_id = Workflow.query.filter(
Workflow.id_ == response_data.get("workflow_id")
).first()
assert workflow_by_id
# Check workflow fetch by name and that name of created workflow
# is the same that was supplied to `api.create_workflow`
import reana_workflow_controller
default_workflow_name = (
reana_workflow_controller.config.DEFAULT_NAME_FOR_WORKFLOWS
)
workflow_by_name = Workflow.query.filter(
Workflow.name == default_workflow_name
).first()
assert workflow_by_name
workflow = workflow_by_id
# Check that the workflow workspace exists
assert os.path.exists(workflow.workspace_path)
def test_create_workflow_wrong_user(
app, session, tmp_shared_volume_path, cwl_workflow_with_name
):
"""Test create workflow providing unknown user."""
with app.test_client() as client:
random_user_uuid = uuid.uuid4()
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": random_user_uuid,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
assert res.status_code == 404
response_data = json.loads(res.get_data(as_text=True))
workflow = Workflow.query.filter(
Workflow.id_ == response_data.get("workflow_id")
).first()
# workflow exists in DB
assert not workflow
def test_download_missing_file(
app, default_user, cwl_workflow_with_name, tmp_shared_volume_path
):
"""Test download missing file."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
assert res.status_code == 201
response_data = json.loads(res.get_data(as_text=True))
workflow_uuid = response_data.get("workflow_id")
file_name = "input.csv"
res = client.get(
url_for(
"workspaces.download_file",
workflow_id_or_name=workflow_uuid,
file_name=file_name,
),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
assert res.status_code == 404
response_data = json.loads(res.get_data(as_text=True))
assert response_data == {"message": "input.csv does not exist."}
def test_download_file(
app,
session,
default_user,
tmp_shared_volume_path,
cwl_workflow_with_name,
sample_serial_workflow_in_db,
):
"""Test download file from workspace."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
response_data = json.loads(res.get_data(as_text=True))
workflow_uuid = response_data.get("workflow_id")
workflow = Workflow.query.filter(Workflow.id_ == workflow_uuid).first()
# create file
file_name = "output name.csv"
file_binary_content = b"1,2,3,4\n5,6,7,8"
# write file in the workflow workspace under `outputs` directory:
# we use `secure_filename` here because
# we use it in server side when adding
# files
absolute_path_workflow_workspace = workflow.workspace_path
file_path = os.path.join(absolute_path_workflow_workspace, file_name)
# because outputs directory doesn't exist by default
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "wb+") as f:
f.write(file_binary_content)
res = client.get(
url_for(
"workspaces.download_file",
workflow_id_or_name=workflow_uuid,
file_name=file_name,
),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
assert res.data == file_binary_content
def test_download_file_with_path(
app, session, default_user, tmp_shared_volume_path, cwl_workflow_with_name
):
"""Test download file prepended with path."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
response_data = json.loads(res.get_data(as_text=True))
workflow_uuid = response_data.get("workflow_id")
workflow = Workflow.query.filter(Workflow.id_ == workflow_uuid).first()
# create file
file_name = "first/1991/output.csv"
file_binary_content = b"1,2,3,4\n5,6,7,8"
# write file in the workflow workspace under `outputs` directory:
# we use `secure_filename` here because
# we use it in server side when adding
# files
file_path = os.path.join(workflow.workspace_path, file_name)
# because outputs directory doesn't exist by default
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "wb+") as f:
f.write(file_binary_content)
res = client.get(
url_for(
"workspaces.download_file",
workflow_id_or_name=workflow_uuid,
file_name=file_name,
),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
assert res.data == file_binary_content
def test_download_dir_or_wildcard(
app, session, default_user, tmp_shared_volume_path, cwl_workflow_with_name
):
"""Test download directory or file(s) matching a wildcard pattern."""
def _download(pattern, workflow_uuid):
return client.get(
url_for(
"workspaces.download_file",
workflow_id_or_name=workflow_uuid,
file_name=pattern,
),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
response_data = json.loads(res.get_data(as_text=True))
workflow_uuid = response_data.get("workflow_id")
workflow = Workflow.query.filter(Workflow.id_ == workflow_uuid).first()
# create files
files = {
"foo/1.txt": b"txt in foo dir",
"foo/bar/1.csv": b"csv in bar dir",
"foo/bar/baz/2.csv": b"csv in baz dir",
}
for file_name, file_binary_content in files.items():
file_path = os.path.join(workflow.workspace_path, file_name)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "wb+") as f:
f.write(file_binary_content)
# download directory by name
res = _download("foo", workflow_uuid)
assert res.headers.get("Content-Type") == "application/zip"
zipfile = ZipFile(io.BytesIO(res.data))
assert len(zipfile.filelist) == 3
for file_name, file_binary_content in files.items():
assert zipfile.read(file_name) == file_binary_content
res = _download("foo/bar", workflow_uuid)
assert res.headers.get("Content-Type") == "application/zip"
zipfile = ZipFile(io.BytesIO(res.data))
assert len(zipfile.filelist) == 2
zipped_file_names = [f.filename for f in zipfile.filelist]
assert "foo/1.txt" not in zipped_file_names
assert zipfile.read("foo/bar/1.csv") == files["foo/bar/1.csv"]
assert zipfile.read("foo/bar/baz/2.csv") == files["foo/bar/baz/2.csv"]
# download by glob pattern
res = _download("**/*.csv", workflow_uuid)
assert res.headers.get("Content-Type") == "application/zip"
zipfile = ZipFile(io.BytesIO(res.data))
assert len(zipfile.filelist) == 2
res = _download("**/1.*", workflow_uuid)
assert res.headers.get("Content-Type") == "application/zip"
zipfile = ZipFile(io.BytesIO(res.data))
assert len(zipfile.filelist) == 2
res = _download("**/*.txt", workflow_uuid)
assert res.headers.get("Content-Type") != "application/zip"
assert res.data == files["foo/1.txt"]
def test_get_files(
app, session, default_user, tmp_shared_volume_path, cwl_workflow_with_name
):
"""Test get files list."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
response_data = json.loads(res.get_data(as_text=True))
workflow_uuid = response_data.get("workflow_id")
workflow = Workflow.query.filter(Workflow.id_ == workflow_uuid).first()
# create file
absolute_path_workflow_workspace = workflow.workspace_path
fs_ = fs.open_fs(absolute_path_workflow_workspace)
test_files = []
for i in range(5):
file_name = "{0}.csv".format(i)
subdir_name = str(uuid.uuid4())
subdir = fs.path.join(subdir_name)
fs_.makedirs(subdir)
fs_.touch("{0}/{1}".format(subdir, file_name))
test_files.append(os.path.join(subdir_name, file_name))
res = client.get(
url_for("workspaces.get_files", workflow_id_or_name=workflow_uuid),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
for file_ in json.loads(res.data.decode())["items"]:
assert file_.get("name") in test_files
def test_get_files_unknown_workflow(app, default_user):
"""Test get list of files for non existing workflow."""
with app.test_client() as client:
# create workflow
random_workflow_uuid = str(uuid.uuid4())
res = client.get(
url_for("workspaces.get_files", workflow_id_or_name=random_workflow_uuid),
query_string={"user": default_user.id_},
content_type="application/json",
)
assert res.status_code == 404
response_data = json.loads(res.get_data(as_text=True))
expected_data = {
"message": "REANA_WORKON is set to {0}, but "
"that workflow does not exist. "
"Please set your REANA_WORKON environment "
"variable appropriately.".format(random_workflow_uuid)
}
assert response_data == expected_data
def test_get_workflow_status_with_uuid(
app, session, default_user, cwl_workflow_with_name, tmp_shared_volume_path
):
"""Test get workflow status."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
response_data = json.loads(res.get_data(as_text=True))
workflow_uuid = response_data.get("workflow_id")
workflow = Workflow.query.filter(Workflow.id_ == workflow_uuid).first()
res = client.get(
url_for("statuses.get_workflow_status", workflow_id_or_name=workflow_uuid),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
json_response = json.loads(res.data.decode())
assert json_response.get("status") == workflow.status.name
workflow.status = RunStatus.finished
session.commit()
res = client.get(
url_for("statuses.get_workflow_status", workflow_id_or_name=workflow_uuid),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
json_response = json.loads(res.data.decode())
assert json_response.get("status") == workflow.status.name
def test_get_workflow_status_with_name(
app, session, default_user, cwl_workflow_with_name
):
"""Test get workflow status."""
with app.test_client() as client:
# create workflow
workflow_uuid = uuid.uuid4()
workflow_name = "my_test_workflow"
workflow = Workflow(
id_=workflow_uuid,
name=workflow_name,
status=RunStatus.finished,
owner_id=default_user.id_,
reana_specification=cwl_workflow_with_name["reana_specification"],
type_=cwl_workflow_with_name["reana_specification"]["type"],
logs="",
)
session.add(workflow)
session.commit()
workflow = Workflow.query.filter(Workflow.name == workflow_name).first()
res = client.get(
url_for(
"statuses.get_workflow_status", workflow_id_or_name=workflow_name + ".1"
),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
json_response = json.loads(res.data.decode())
assert json_response.get("status") == workflow.status.name
workflow.status = RunStatus.finished
session.commit()
res = client.get(
url_for(
"statuses.get_workflow_status", workflow_id_or_name=workflow_name + ".1"
),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
json_response = json.loads(res.data.decode())
assert json_response.get("status") == workflow.status.name
def test_get_workflow_status_unauthorized(
app, default_user, cwl_workflow_with_name, tmp_shared_volume_path
):
"""Test get workflow status unauthorized."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
response_data = json.loads(res.get_data(as_text=True))
workflow_created_uuid = response_data.get("workflow_id")
random_user_uuid = uuid.uuid4()
res = client.get(
url_for(
"statuses.get_workflow_status",
workflow_id_or_name=workflow_created_uuid,
),
query_string={"user": random_user_uuid},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
assert res.status_code == 403
def test_get_workflow_status_unknown_workflow(
app, default_user, cwl_workflow_with_name
):
"""Test get workflow status for unknown workflow."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
random_workflow_uuid = uuid.uuid4()
res = client.get(
url_for(
"statuses.get_workflow_status", workflow_id_or_name=random_workflow_uuid
),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
assert res.status_code == 404
def test_set_workflow_status(
app,
corev1_api_client_with_user_secrets,
user_secrets,
session,
default_user,
yadage_workflow_with_name,
tmp_shared_volume_path,
):
"""Test set workflow status "Start"."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(yadage_workflow_with_name),
)
response_data = json.loads(res.get_data(as_text=True))
workflow_created_uuid = response_data.get("workflow_id")
workflow = Workflow.query.filter(Workflow.id_ == workflow_created_uuid).first()
assert workflow.status == RunStatus.created
payload = START
with mock.patch(
"reana_workflow_controller.workflow_run_manager."
"current_k8s_batchv1_api_client"
) as k8s_api_client:
# provide user secret store
with mock.patch(
"reana_commons.k8s.secrets." "current_k8s_corev1_api_client",
corev1_api_client_with_user_secrets(user_secrets),
):
# set workflow status to START
res = client.put(
url_for(
"statuses.set_workflow_status",
workflow_id_or_name=workflow_created_uuid,
),
query_string={"user": default_user.id_, "status": "start"},
)
json_response = json.loads(res.data.decode())
assert json_response.get("status") == status_dict[payload].name
k8s_api_client.create_namespaced_job.assert_called_once()
def test_start_already_started_workflow(
app,
session,
default_user,
corev1_api_client_with_user_secrets,
user_secrets,
yadage_workflow_with_name,
tmp_shared_volume_path,
):
"""Test start workflow twice."""
with app.test_client() as client:
os.environ["TESTS"] = "True"
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(yadage_workflow_with_name),
)
response_data = json.loads(res.get_data(as_text=True))
workflow_created_uuid = response_data.get("workflow_id")
workflow = Workflow.query.filter(Workflow.id_ == workflow_created_uuid).first()
assert workflow.status == RunStatus.created
payload = START
with mock.patch(
"reana_workflow_controller.workflow_run_manager."
"current_k8s_batchv1_api_client"
):
# provide user secret store
with mock.patch(
"reana_commons.k8s.secrets." "current_k8s_corev1_api_client",
corev1_api_client_with_user_secrets(user_secrets),
):
# set workflow status to START
res = client.put(
url_for(
"statuses.set_workflow_status",
workflow_id_or_name=workflow_created_uuid,
),
query_string={"user": default_user.id_, "status": "start"},
)
json_response = json.loads(res.data.decode())
assert json_response.get("status") == status_dict[payload].name
res = client.put(
url_for(
"statuses.set_workflow_status",
workflow_id_or_name=workflow_created_uuid,
),
query_string={"user": default_user.id_, "status": "start"},
)
json_response = json.loads(res.data.decode())
assert res.status_code == 409
expected_message = (
"Workflow {0} could not be started because"
" it is already pending."
).format(workflow_created_uuid)
assert json_response.get("message") == expected_message
@pytest.mark.parametrize(
"current_status, expected_status, expected_http_status_code, "
"k8s_stop_call_count",
[
(RunStatus.created, RunStatus.created, 409, 0),
(RunStatus.running, RunStatus.stopped, 200, 1),
(RunStatus.failed, RunStatus.failed, 409, 0),
(RunStatus.finished, RunStatus.finished, 409, 0),
],
)
def test_stop_workflow(
current_status,
expected_status,
expected_http_status_code,
k8s_stop_call_count,
app,
default_user,
yadage_workflow_with_name,
sample_serial_workflow_in_db,
session,
):
"""Test stop workflow."""
with app.test_client() as client:
sample_serial_workflow_in_db.status = current_status
session.add(sample_serial_workflow_in_db)
session.commit()
with mock.patch(
"reana_workflow_controller.workflow_run_manager."
"current_k8s_batchv1_api_client"
) as stop_workflow_mock:
res = client.put(
url_for(
"statuses.set_workflow_status",
workflow_id_or_name=sample_serial_workflow_in_db.name,
),
query_string={"user": default_user.id_, "status": "stop"},
)
assert sample_serial_workflow_in_db.status == expected_status
assert res.status_code == expected_http_status_code
assert (
stop_workflow_mock.delete_namespaced_job.call_count
== k8s_stop_call_count
)
def test_set_workflow_status_unauthorized(
app, default_user, yadage_workflow_with_name, tmp_shared_volume_path
):
"""Test set workflow status unauthorized."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(yadage_workflow_with_name),
)
response_data = json.loads(res.get_data(as_text=True))
workflow_created_uuid = response_data.get("workflow_id")
random_user_uuid = uuid.uuid4()
payload = START
res = client.put(
url_for(
"statuses.set_workflow_status",
workflow_id_or_name=workflow_created_uuid,
),
query_string={"user": random_user_uuid, "status": payload},
content_type="application/json",
)
assert res.status_code == 403
def test_set_workflow_status_unknown_workflow(
app, default_user, yadage_workflow_with_name, tmp_shared_volume_path
):
"""Test set workflow status for unknown workflow."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(yadage_workflow_with_name),
)
random_workflow_uuid = uuid.uuid4()
payload = START
res = client.put(
url_for(
"statuses.set_workflow_status", workflow_id_or_name=random_workflow_uuid
),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(payload),
)
assert res.status_code == 404
def test_upload_file(
app, session, default_user, tmp_shared_volume_path, cwl_workflow_with_name
):
"""Test upload file."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
response_data = json.loads(res.get_data(as_text=True))
workflow_uuid = response_data.get("workflow_id")
workflow = Workflow.query.filter(Workflow.id_ == workflow_uuid).first()
# create file
file_name = "dataset.csv"
file_binary_content = b"1,2,3,4\n5,6,7,8"
res = client.post(
url_for("workspaces.upload_file", workflow_id_or_name=workflow_uuid),
query_string={"user": default_user.id_, "file_name": file_name},
content_type="application/octet-stream",
input_stream=io.BytesIO(file_binary_content),
)
assert res.status_code == 200
# remove workspace directory from path
workflow_workspace = workflow.workspace_path
# we use `secure_filename` here because
# we use it in server side when adding
# files
absolute_file_path = os.path.join(
workflow_workspace, secure_filename(file_name)
)
with open(absolute_file_path, "rb") as f:
assert f.read() == file_binary_content
def test_upload_file_unknown_workflow(app, default_user):
"""Test upload file to non existing workflow."""
with app.test_client() as client:
random_workflow_uuid = uuid.uuid4()
# create file
file_name = "dataset.csv"
file_binary_content = b"1,2,3,4\n5,6,7,8"
res = client.post(
url_for("workspaces.upload_file", workflow_id_or_name=random_workflow_uuid),
query_string={"user": default_user.id_, "file_name": file_name},
content_type="application/octet-stream",
input_stream=io.BytesIO(file_binary_content),
)
assert res.status_code == 404
def test_delete_file(app, default_user, sample_serial_workflow_in_db):
"""Test delete file."""
# Move to fixture
from flask import current_app
create_workflow_workspace(sample_serial_workflow_in_db.workspace_path)
file_name = "dataset.csv"
file_binary_content = b"1,2,3,4\n5,6,7,8"
abs_path_to_file = os.path.join(
sample_serial_workflow_in_db.workspace_path, file_name
)
with open(abs_path_to_file, "wb+") as f:
f.write(file_binary_content)
assert os.path.exists(abs_path_to_file)
with app.test_client() as client:
res = client.delete(
url_for(
"workspaces.delete_file",
workflow_id_or_name=sample_serial_workflow_in_db.id_,
file_name=file_name,
),
query_string={"user": default_user.id_},
)
assert res.status_code == 200
assert not os.path.exists(abs_path_to_file)
def test_get_created_workflow_logs(
app, default_user, cwl_workflow_with_name, tmp_shared_volume_path
):
"""Test get workflow logs."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(cwl_workflow_with_name),
)
response_data = json.loads(res.get_data(as_text=True))
workflow_uuid = response_data.get("workflow_id")
workflow_name = response_data.get("workflow_name")
res = client.get(
url_for("statuses.get_workflow_logs", workflow_id_or_name=workflow_uuid),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(None),
)
assert res.status_code == 200
response_data = json.loads(res.get_data(as_text=True))
expected_data = {
"workflow_id": workflow_uuid,
"workflow_name": workflow_name,
"user": str(default_user.id_),
"logs": '{"workflow_logs": "", "job_logs": {},' ' "engine_specific": null}',
}
assert response_data == expected_data
def test_get_unknown_workflow_logs(
app, default_user, yadage_workflow_with_name, tmp_shared_volume_path
):
"""Test set workflow status for unknown workflow."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(yadage_workflow_with_name),
)
random_workflow_uuid = uuid.uuid4()
res = client.get(
url_for(
"statuses.get_workflow_logs", workflow_id_or_name=random_workflow_uuid
),
query_string={"user": default_user.id_},
content_type="application/json",
)
assert res.status_code == 404
def test_get_workflow_logs_unauthorized(
app, default_user, yadage_workflow_with_name, tmp_shared_volume_path
):
"""Test set workflow status for unknown workflow."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(yadage_workflow_with_name),
)
response_data = json.loads(res.get_data(as_text=True))
workflow_uuid = response_data.get("workflow_id")
random_user_uuid = uuid.uuid4()
res = client.get(
url_for("statuses.get_workflow_logs", workflow_id_or_name=workflow_uuid),
query_string={"user": random_user_uuid},
content_type="application/json",
)
assert res.status_code == 403
def test_start_input_parameters(
app,
session,
default_user,
user_secrets,
corev1_api_client_with_user_secrets,
sample_serial_workflow_in_db,
):
"""Test start workflow with inupt parameters."""
with app.test_client() as client:
# create workflow
sample_serial_workflow_in_db.status = RunStatus.created
workflow_created_uuid = sample_serial_workflow_in_db.id_
session.add(sample_serial_workflow_in_db)
session.commit()
workflow = Workflow.query.filter(Workflow.id_ == workflow_created_uuid).first()
assert workflow.status == RunStatus.created
payload = START
parameters = {"input_parameters": {"first": "test"}, "operational_options": {}}
with mock.patch(
"reana_workflow_controller.workflow_run_manager."
"current_k8s_batchv1_api_client"
):
# provide user secret store
with mock.patch(
"reana_commons.k8s.secrets." "current_k8s_corev1_api_client",
corev1_api_client_with_user_secrets(user_secrets),
):
# set workflow status to START and pass parameters
res = client.put(
url_for(
"statuses.set_workflow_status",
workflow_id_or_name=workflow_created_uuid,
),
query_string={"user": default_user.id_, "status": "start"},
content_type="application/json",
data=json.dumps(parameters),
)
json_response = json.loads(res.data.decode())
assert json_response.get("status") == status_dict[payload].name
workflow = Workflow.query.filter(
Workflow.id_ == workflow_created_uuid
).first()
assert workflow.input_parameters == parameters["input_parameters"]
def test_start_workflow_db_failure(
app,
session,
default_user,
user_secrets,
corev1_api_client_with_user_secrets,
sample_serial_workflow_in_db,
):
"""Test starting workflow with a DB failure."""
mock_session_cls = mock.Mock()
mock_session = mock.Mock()
mock_session_cls.object_session.return_value = mock_session
from sqlalchemy.exc import SQLAlchemyError
mock_session.commit = mock.Mock(
side_effect=SQLAlchemyError("Could not connect to the server.")
)
mock_k8s_run_manager_cls = mock.Mock()
k8s_workflow_run_manager = mock.Mock()
mock_k8s_run_manager_cls.return_value = k8s_workflow_run_manager
with mock.patch.multiple(
"reana_workflow_controller.rest.utils",
Session=mock_session_cls,
KubernetesWorkflowRunManager=mock_k8s_run_manager_cls,
):
with app.test_client() as client:
res = client.put(
url_for(
"statuses.set_workflow_status",
workflow_id_or_name=sample_serial_workflow_in_db.id_,
),
query_string={"user": default_user.id_, "status": "start"},
content_type="application/json",
data=json.dumps({}),
)
assert res.status_code == 502
def test_start_workflow_kubernetes_failure(
app,
session,
default_user,
user_secrets,
corev1_api_client_with_user_secrets,
sample_serial_workflow_in_db,
):
"""Test starting workflow with a Kubernetes failure when creating jobs."""
mock_k8s_run_manager_cls = mock.Mock()
k8s_workflow_run_manager = mock.Mock()
from kubernetes.client.rest import ApiException
k8s_workflow_run_manager.start_batch_workflow_run = mock.Mock(
side_effect=ApiException("Could not connect to Kubernetes.")
)
mock_k8s_run_manager_cls.return_value = k8s_workflow_run_manager
with mock.patch.multiple(
"reana_workflow_controller.rest.utils",
KubernetesWorkflowRunManager=mock_k8s_run_manager_cls,
):
with app.test_client() as client:
res = client.put(
url_for(
"statuses.set_workflow_status",
workflow_id_or_name=sample_serial_workflow_in_db.id_,
),
query_string={"user": default_user.id_, "status": "start"},
content_type="application/json",
data=json.dumps({}),
)
assert res.status_code == 502
@pytest.mark.parametrize(
"status",
[
RunStatus.created,
RunStatus.failed,
RunStatus.finished,
pytest.param(RunStatus.deleted, marks=pytest.mark.xfail),
pytest.param(RunStatus.running, marks=pytest.mark.xfail),
],
)
def test_delete_workflow(
app, session, default_user, sample_yadage_workflow_in_db, status
):
"""Test deletion of a workflow in all possible statuses."""
sample_yadage_workflow_in_db.status = status
session.add(sample_yadage_workflow_in_db)
session.commit()
with app.test_client() as client:
client.put(
url_for(
"statuses.set_workflow_status",
workflow_id_or_name=sample_yadage_workflow_in_db.id_,
),
query_string={"user": default_user.id_, "status": "deleted"},
content_type="application/json",
data=json.dumps({}),
)
assert sample_yadage_workflow_in_db.status == RunStatus.deleted
def test_delete_all_workflow_runs(
app, session, default_user, yadage_workflow_with_name
):
"""Test deletion of all runs of a given workflow."""
# add 5 workflows in the database with the same name
for i in range(5):
workflow = Workflow(
id_=uuid.uuid4(),
name=yadage_workflow_with_name["name"],
owner_id=default_user.id_,
reana_specification=yadage_workflow_with_name["reana_specification"],
operational_options={},
type_=yadage_workflow_with_name["reana_specification"]["workflow"]["type"],
logs="",
)
session.add(workflow)
session.commit()
first_workflow = (
session.query(Workflow)
.filter_by(name=yadage_workflow_with_name["name"])
.first()
)
with app.test_client() as client:
client.put(
url_for(
"statuses.set_workflow_status", workflow_id_or_name=first_workflow.id_
),
query_string={"user": default_user.id_, "status": "deleted"},
content_type="application/json",
data=json.dumps({"all_runs": True}),
)
for workflow in session.query(Workflow).filter_by(name=first_workflow.name).all():
assert workflow.status == RunStatus.deleted
@pytest.mark.parametrize("workspace", [True, False])
def test_workspace_deletion(
app,
session,
default_user,
yadage_workflow_with_name,
tmp_shared_volume_path,
workspace,
):
"""Test workspace deletion."""
with app.test_client() as client:
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(yadage_workflow_with_name),
)
assert res.status_code == 201
response_data = json.loads(res.get_data(as_text=True))
workflow = Workflow.query.filter(
Workflow.id_ == response_data.get("workflow_id")
).first()
assert workflow
# create a job for the workflow
workflow_job = Job(id_=uuid.uuid4(), workflow_uuid=workflow.id_)
job_cache_entry = JobCache(job_id=workflow_job.id_)
session.add(workflow_job)
session.commit()
session.add(job_cache_entry)
session.commit()
# check that the workflow workspace exists
assert os.path.exists(workflow.workspace_path)
with app.test_client() as client:
res = client.put(
url_for(
"statuses.set_workflow_status", workflow_id_or_name=workflow.id_
),
query_string={"user": default_user.id_, "status": "deleted"},
content_type="application/json",
data=json.dumps({"workspace": workspace}),
)
if workspace:
assert not os.path.exists(workflow.workspace_path)
# check that all cache entries for jobs
# of the deleted workflow are removed
cache_entries_after_delete = JobCache.query.filter_by(
job_id=workflow_job.id_
).all()
assert not cache_entries_after_delete
def test_deletion_of_workspace_of_an_already_deleted_workflow(
app, session, default_user, yadage_workflow_with_name, tmp_shared_volume_path
):
"""Test workspace deletion of an already deleted workflow."""
with app.test_client() as client:
res = client.post(
url_for("workflows.create_workflow"),
query_string={
"user": default_user.id_,
"workspace_root_path": tmp_shared_volume_path,
},
content_type="application/json",
data=json.dumps(yadage_workflow_with_name),
)
assert res.status_code == 201
response_data = json.loads(res.get_data(as_text=True))
workflow = Workflow.query.filter(
Workflow.id_ == response_data.get("workflow_id")
).first()
assert workflow
# check that the workflow workspace exists
assert os.path.exists(workflow.workspace_path)
with app.test_client() as client:
res = client.put(
url_for(
"statuses.set_workflow_status", workflow_id_or_name=workflow.id_
),
query_string={"user": default_user.id_, "status": "deleted"},
content_type="application/json",
data=json.dumps({"workspace": False}),
)
assert os.path.exists(workflow.workspace_path)
delete_workflow(workflow, workspace=True)
assert not os.path.exists(workflow.workspace_path)
def test_get_workflow_diff(
app,
default_user,
sample_yadage_workflow_in_db,
sample_serial_workflow_in_db,
tmp_shared_volume_path,
):
"""Test set workflow status for unknown workflow."""
with app.test_client() as client:
res = client.get(
url_for(
"workflows.get_workflow_diff",
workflow_id_or_name_a=sample_serial_workflow_in_db.id_,
workflow_id_or_name_b=sample_yadage_workflow_in_db.id_,
),
query_string={"user": default_user.id_},
content_type="application/json",
)
assert res.status_code == 200
response_data = json.loads(res.get_data(as_text=True))
assert "reana_specification" in response_data
assert "workspace_listing" in response_data
workflow_diff = json.loads(response_data["reana_specification"])["workflow"]
entire_diff_as_string = "".join(str(e) for e in workflow_diff)
# the following should be present in the diff
assert "serial" in "".join(
str(e) for e in json.loads(response_data["reana_specification"])["workflow"]
)
assert "yadage" in "".join(
str(e) for e in json.loads(response_data["reana_specification"])["workflow"]
)
assert (
json.dumps(
sample_serial_workflow_in_db.reana_specification["workflow"][
"specification"
]["steps"][0]["commands"]
)
in entire_diff_as_string
)
# single line of the entire specification is tested
# get_workflow_diff() returns extra characters between lines
assert (
sample_yadage_workflow_in_db.reana_specification["workflow"][
"specification"
]["first"]
in entire_diff_as_string
)
print("done")
def test_get_workspace_diff(
app,
default_user,
sample_yadage_workflow_in_db,
sample_serial_workflow_in_db,
tmp_shared_volume_path,
):
"""Test get workspace differences."""
# create the workspaces for the two workflows
workspace_path_a = sample_serial_workflow_in_db.workspace_path
workspace_path_b = sample_yadage_workflow_in_db.workspace_path
# Create files that differ in one line
csv_line = "1,2,3,4"
file_name = "test.csv"
for index, workspace in enumerate([workspace_path_a, workspace_path_b]):
with open(os.path.join(workspace, file_name), "w",) as f:
f.write("# File {}".format(index))
f.write(os.linesep)
f.write(csv_line)
f.flush()
with app.test_client() as client:
res = client.get(
url_for(
"workflows.get_workflow_diff",
workflow_id_or_name_a=sample_serial_workflow_in_db.id_,
workflow_id_or_name_b=sample_yadage_workflow_in_db.id_,
),
query_string={"user": default_user.id_},
content_type="application/json",
)
assert res.status_code == 200
response_data = json.loads(res.get_data(as_text=True))
assert "# File" in response_data["workspace_listing"]
def test_create_interactive_session(app, default_user, sample_serial_workflow_in_db):
"""Test create interactive session."""
wrm = WorkflowRunManager(sample_serial_workflow_in_db)
expected_data = {"path": wrm._generate_interactive_workflow_path()}
with app.test_client() as client:
# create workflow
with mock.patch.multiple(
"reana_workflow_controller.k8s",
current_k8s_corev1_api_client=mock.DEFAULT,
current_k8s_networking_v1beta1=mock.DEFAULT,
current_k8s_appsv1_api_client=mock.DEFAULT,
):
res = client.post(
url_for(
"workflows_session.open_interactive_session",
workflow_id_or_name=sample_serial_workflow_in_db.id_,
interactive_session_type="jupyter",
),
query_string={"user": default_user.id_},
)
assert res.json == expected_data
def test_create_interactive_session_unknown_type(
app, default_user, sample_serial_workflow_in_db
):
"""Test create interactive session for unknown interactive type."""
with app.test_client() as client:
# create workflow
res = client.post(
url_for(
"workflows_session.open_interactive_session",
workflow_id_or_name=sample_serial_workflow_in_db.id_,
interactive_session_type="terminl",
),
query_string={"user": default_user.id_},
)
assert res.status_code == 404
def test_create_interactive_session_custom_image(
app, default_user, sample_serial_workflow_in_db
):
"""Create an interactive session with custom image."""
custom_image = "test/image"
interactive_session_configuration = {"image": custom_image}
with app.test_client() as client:
# create workflow
with mock.patch.multiple(
"reana_workflow_controller.k8s",
current_k8s_corev1_api_client=mock.DEFAULT,
current_k8s_networking_v1beta1=mock.DEFAULT,
current_k8s_appsv1_api_client=mock.DEFAULT,
) as mocks:
client.post(
url_for(
"workflows_session.open_interactive_session",
workflow_id_or_name=sample_serial_workflow_in_db.id_,
interactive_session_type="jupyter",
),
query_string={"user": default_user.id_},
content_type="application/json",
data=json.dumps(interactive_session_configuration),
)
fargs, _ = mocks[
"current_k8s_appsv1_api_client"
].create_namespaced_deployment.call_args
assert fargs[1].spec.template.spec.containers[0].image == custom_image
def test_close_interactive_session(
app, session, default_user, sample_serial_workflow_in_db
):
"""Test close an interactive session."""
expected_data = {"message": "The interactive session has been closed"}
path = "/5d9b30fd-f225-4615-9107-b1373afec070"
name = "interactive-jupyter-5d9b30fd-f225-4615-9107-b1373afec070-5lswkp"
int_session = InteractiveSession(
name=name, path=path, owner_id=sample_serial_workflow_in_db.owner_id,
)
sample_serial_workflow_in_db.sessions.append(int_session)
session.add(sample_serial_workflow_in_db)
session.commit()
with app.test_client() as client:
with mock.patch(
"reana_workflow_controller.k8s" ".current_k8s_networking_v1beta1"
):
res = client.post(
url_for(
"workflows_session.close_interactive_session",
workflow_id_or_name=sample_serial_workflow_in_db.id_,
),
query_string={"user": default_user.id_},
content_type="application/json",
)
assert res.json == expected_data
def test_close_interactive_session_not_opened(
app, session, default_user, sample_serial_workflow_in_db
):
"""Test close an interactive session when session is not opened."""
expected_data = {
"message": "Workflow - {} has no open interactive session.".format(
sample_serial_workflow_in_db.id_
)
}
with app.test_client() as client:
sample_serial_workflow_in_db.sessions = []
session.add(sample_serial_workflow_in_db)
session.commit()
res = client.post(
url_for(
"workflows_session.close_interactive_session",
workflow_id_or_name=sample_serial_workflow_in_db.id_,
),
query_string={"user": default_user.id_},
content_type="application/json",
)
assert res.json == expected_data
assert res._status_code == 404
| 36.977837
| 88
| 0.620668
| 6,355
| 55,060
| 5.029111
| 0.059953
| 0.03373
| 0.03204
| 0.038548
| 0.805476
| 0.776721
| 0.751596
| 0.72378
| 0.702315
| 0.6852
| 0
| 0.007886
| 0.283709
| 55,060
| 1,488
| 89
| 37.002688
| 0.80248
| 0.072793
| 0
| 0.672091
| 0
| 0
| 0.120028
| 0.049623
| 0
| 0
| 0
| 0
| 0.078112
| 1
| 0.034174
| false
| 0
| 0.014646
| 0.000814
| 0.049634
| 0.000814
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
29784cf0cdfab1cc997d9b758eaf01874f17d4c5
| 1,208
|
py
|
Python
|
test/android_test.py
|
TE-ToshiakiTanaka/atve
|
e6ad4d2343dc9271d173729c2680eddf3d5dd8a6
|
[
"MIT"
] | null | null | null |
test/android_test.py
|
TE-ToshiakiTanaka/atve
|
e6ad4d2343dc9271d173729c2680eddf3d5dd8a6
|
[
"MIT"
] | null | null | null |
test/android_test.py
|
TE-ToshiakiTanaka/atve
|
e6ad4d2343dc9271d173729c2680eddf3d5dd8a6
|
[
"MIT"
] | null | null | null |
import os
from atve.script import AtveTestCase
from runner import TestAtveTestRunner as TSTR
from nose.tools import with_setup, raises, ok_, eq_
class TestAndroidTestRuner(TSTR):
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_01(self):
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_01.py")
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_02(self):
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_02.py")
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_03(self):
AtveTestCase.set("android.serial", "emulator-5554")
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_03.py")
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_04(self):
AtveTestCase.set("android.serial", "emulator-5554")
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_04.py")
| 41.655172
| 68
| 0.746689
| 165
| 1,208
| 5.157576
| 0.236364
| 0.084606
| 0.13161
| 0.084606
| 0.80611
| 0.80611
| 0.80611
| 0.80611
| 0.80611
| 0.80611
| 0
| 0.023301
| 0.147351
| 1,208
| 28
| 69
| 43.142857
| 0.802913
| 0
| 0
| 0.434783
| 0
| 0
| 0.110927
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173913
| false
| 0
| 0.173913
| 0
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
464d8ffa6f6ed2e02e9fdd067d36cc0af7bd21e0
| 2,748
|
py
|
Python
|
CSipSimple/jni/swig-glue/clean_source_for_android.py
|
dmfr/CSipSimple-mirror
|
f2f2b8efcb739090a45b205690a0fb5b74bce343
|
[
"OpenSSL",
"Unlicense"
] | 4
|
2016-09-29T00:04:31.000Z
|
2021-12-02T08:39:51.000Z
|
CSipSimple/jni/swig-glue/clean_source_for_android.py
|
dmfr/CSipSimple-mirror
|
f2f2b8efcb739090a45b205690a0fb5b74bce343
|
[
"OpenSSL",
"Unlicense"
] | null | null | null |
CSipSimple/jni/swig-glue/clean_source_for_android.py
|
dmfr/CSipSimple-mirror
|
f2f2b8efcb739090a45b205690a0fb5b74bce343
|
[
"OpenSSL",
"Unlicense"
] | null | null | null |
#!/usr/bin/python
import re
import sys
def remove_rtti(text):
return re.sub(r'dynamic_cast<(.* \*)>', r'(\1)', text)
def make_dalvik_compat(text):
init_text = """/* Utility class for managing the JNI environment */
class JNIEnvWrapper {
const Director *director_;
JNIEnv *jenv_;
public:
JNIEnvWrapper(const Director *director) : director_(director), jenv_(0) {
#if defined(SWIG_JAVA_ATTACH_CURRENT_THREAD_AS_DAEMON)
// Attach a daemon thread to the JVM. Useful when the JVM should not wait for
// the thread to exit upon shutdown. Only for jdk-1.4 and later.
director_->swig_jvm_->AttachCurrentThreadAsDaemon((void **) &jenv_, NULL);
#else
director_->swig_jvm_->AttachCurrentThread((void **) &jenv_, NULL);
#endif
}
~JNIEnvWrapper() {
#if !defined(SWIG_JAVA_NO_DETACH_CURRENT_THREAD)
// Some JVMs, eg jdk-1.4.2 and lower on Solaris have a bug and crash with the DetachCurrentThread call.
// However, without this call, the JVM hangs on exit when the thread was not created by the JVM and creates a memory leak.
director_->swig_jvm_->DetachCurrentThread();
#endif
}
JNIEnv *getJNIEnv() const {
return jenv_;
}
};"""
final_text = """/* Utility class for managing the JNI environment */
class JNIEnvWrapper {
const Director *director_;
JNIEnv *jenv_;
int env_status;
JNIEnv *g_env;
public:
JNIEnvWrapper(const Director *director) : director_(director), jenv_(0) {
env_status = director_->swig_jvm_->GetEnv( (void **) &g_env, JNI_VERSION_1_6);
#if defined(SWIG_JAVA_ATTACH_CURRENT_THREAD_AS_DAEMON)
// Attach a daemon thread to the JVM. Useful when the JVM should not wait for
// the thread to exit upon shutdown. Only for jdk-1.4 and later.
director_->swig_jvm_->AttachCurrentThreadAsDaemon( &jenv_, NULL);
#else
director_->swig_jvm_->AttachCurrentThread( &jenv_, NULL);
#endif
}
~JNIEnvWrapper() {
#if !defined(SWIG_JAVA_NO_DETACH_CURRENT_THREAD)
// Some JVMs, eg jdk-1.4.2 and lower on Solaris have a bug and crash with the DetachCurrentThread call.
// However, without this call, the JVM hangs on exit when the thread was not created by the JVM and creates a memory leak.
if( env_status == JNI_EDETACHED ){
director_->swig_jvm_->DetachCurrentThread();
}
#endif
}
JNIEnv *getJNIEnv() const {
return jenv_;
}
};"""
return text.replace(init_text, final_text)
if __name__ == '__main__':
filename = sys.argv[1]
brut_code = open(filename).read()
code_wo_rtti = remove_rtti(brut_code)
code_dalvik_compat = make_dalvik_compat(code_wo_rtti)
print(code_dalvik_compat)
| 36.64
| 130
| 0.682678
| 366
| 2,748
| 4.86612
| 0.303279
| 0.07187
| 0.058956
| 0.076362
| 0.783829
| 0.783829
| 0.783829
| 0.732173
| 0.732173
| 0.663672
| 0
| 0.007411
| 0.214338
| 2,748
| 74
| 131
| 37.135135
| 0.817508
| 0.005822
| 0
| 0.567164
| 0
| 0.059701
| 0.855731
| 0.182717
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029851
| false
| 0
| 0.029851
| 0.014925
| 0.119403
| 0.014925
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
465c4cf006795c0c9e2e8bf85f83d247c047137e
| 2,640
|
py
|
Python
|
tests/homework/test_homework2.py
|
acc-cosc-1336/cosc-1336-spring-2018-artgonzalezacc
|
c5dcc0ad7c47345c274d61c7e94f6c3b0ed42245
|
[
"MIT"
] | null | null | null |
tests/homework/test_homework2.py
|
acc-cosc-1336/cosc-1336-spring-2018-artgonzalezacc
|
c5dcc0ad7c47345c274d61c7e94f6c3b0ed42245
|
[
"MIT"
] | 4
|
2018-02-02T13:51:49.000Z
|
2018-04-01T03:07:58.000Z
|
tests/homework/test_homework2.py
|
acc-cosc-1336/cosc-1336-spring-2018-artgonzalezacc
|
c5dcc0ad7c47345c274d61c7e94f6c3b0ed42245
|
[
"MIT"
] | 3
|
2018-01-26T00:24:18.000Z
|
2018-04-26T00:40:17.000Z
|
import unittest
from src.homework.homework2 import get_time
from src.homework.homework2 import time_from_utc
class TestHomework2(unittest.TestCase):
def test_get_time_when_time_type_when_value_0(self):
self.assertEqual('Invalid time_type(12 or 24 only)', get_time(9,30,45,-5))
def test_get_time_when_time_type_when_value_25(self):
self.assertEqual('Invalid time_type(12 or 24 only)', get_time(9,30,45, 25))
def test_get_time_when_time_type_24_hours_gt_23(self):
self.assertEqual('Invalid hours(range 0-23)', get_time(24,11,45, 24))
def test_get_time_when_time_type_12_hours_gt_12(self):
self.assertEqual('Invalid hours(range 1-12)', get_time(13,11,45, 12))
def test_get_time_when_time_type_12_hours_lt_0(self):
self.assertEqual('Invalid hours(range 1-12)', get_time(-5,11,45, 12))
def test_get_time_when_minutes_lt_0(self):
self.assertEqual('Invalid minutes(range 0-59)', get_time(9,-1,45, 12))
def test_get_time_when_minutes_gt_59(self):
self.assertEqual('Invalid minutes(range 0-59)', get_time(9,60,45, 12))
def test_get_time_when_seconds_lt_0(self):
self.assertEqual('Invalid seconds(range 0-59)', get_time(9,10,-1, 12))
def test_get_time_when_seconds_gt_59(self):
self.assertEqual('Invalid seconds(range 0-59)', get_time(9,50,60, 12))
def test_get_time_when_time_type_24_w_valid_time_21_9_9_24(self):
self.assertEqual('21:09:09', get_time(21, 9, 9, 24))
def test_get_time_when_time_type_12_w_valid_time_9_9_9_12_PM(self):
self.assertEqual('09:09:09 PM', get_time(9, 9, 9, 12, 'PM'))
def test_get_time_when_time_type_24_w_valid_time_21_29_19_24(self):
self.assertEqual('21:29:19', get_time(21, 29, 19, 24))
def test_get_time_when_time_type_12_w_valid_time_9_29_19_12_PM(self):
self.assertEqual('09:29:19 PM', get_time(9, 29, 19, 12, 'PM'))
def test_get_time_when_time_type_12_w_valid_time_9_29_19_12_AM(self):
self.assertEqual('09:29:19 AM', get_time(9, 29, 19, 12, 'AM'))
def test_get_time_when_time_type_12_w_valid_time_9_9_9_12_AM_no_argument(self):
self.assertEqual('09:29:19 AM', get_time(9, 29, 19, 12))
def test_utc_time_to_eastern_standard_time(self):
self.assertEqual(15, time_from_utc(-5, 20))
def test_utc_time_to_central_standard_time(self):
self.assertEqual(14, time_from_utc(-6, 20))
def test_utc_time_to_mountain_standard_time(self):
self.assertEqual(13, time_from_utc(-7, 20))
def test_utc_time_to_pacific_standard_time(self):
self.assertEqual(12, time_from_utc(-8, 20))
| 40.615385
| 83
| 0.731439
| 479
| 2,640
| 3.607516
| 0.137787
| 0.125579
| 0.208912
| 0.121528
| 0.862847
| 0.706019
| 0.601852
| 0.569444
| 0.513889
| 0.429398
| 0
| 0.115916
| 0.150379
| 2,640
| 64
| 84
| 41.25
| 0.654481
| 0
| 0
| 0
| 0
| 0
| 0.118606
| 0
| 0
| 0
| 0
| 0
| 0.452381
| 1
| 0.452381
| false
| 0
| 0.071429
| 0
| 0.547619
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
465f13dd4a728313b2cc3608ea7156123206075f
| 3,297
|
py
|
Python
|
reward_learning/demos.py
|
Stanford-ILIAD/DPP-Batch-Active-Learning
|
99a97ddbe7f58b22f02671daa42c4ffb7e2b021f
|
[
"MIT"
] | 10
|
2019-06-20T18:57:36.000Z
|
2021-12-14T04:36:16.000Z
|
reward_learning/demos.py
|
Stanford-ILIAD/DPP-Batch-Active-Learning
|
99a97ddbe7f58b22f02671daa42c4ffb7e2b021f
|
[
"MIT"
] | null | null | null |
reward_learning/demos.py
|
Stanford-ILIAD/DPP-Batch-Active-Learning
|
99a97ddbe7f58b22f02671daa42c4ffb7e2b021f
|
[
"MIT"
] | 3
|
2019-12-13T01:48:00.000Z
|
2020-03-17T08:33:45.000Z
|
from sampling import Sampler
import algos
import numpy as np
from simulation_utils import create_env, get_feedback, run_algo
import sys
def batch(task, method, N, M, b):
if N % b != 0:
print('N must be divisible to b')
exit(0)
B = 20*b
simulation_object = create_env(task)
d = simulation_object.num_of_features
w_true = 2*np.random.rand(d)-1
w_true = w_true / np.linalg.norm(w_true)
print('If in automated mode: true w = {}'.format(w_true/np.linalg.norm(w_true)))
lower_input_bound = [x[0] for x in simulation_object.feed_bounds]
upper_input_bound = [x[1] for x in simulation_object.feed_bounds]
w_sampler = Sampler(d)
psi_set = []
s_set = []
i = 0
while i < N:
w_sampler.A = psi_set
w_sampler.y = np.array(s_set).reshape(-1,1)
w_samples = w_sampler.sample(M)
mean_w_samples = np.mean(w_samples,axis=0)
print('Samples so far: ' + str(i))
print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
inputA_set, inputB_set = run_algo(method, simulation_object, w_samples, b, B)
for j in range(b):
input_A = inputA_set[j]
input_B = inputB_set[j]
psi, s = get_feedback(simulation_object, input_B, input_A, w_true)
psi_set.append(psi)
s_set.append(s)
i += b
w_sampler.A = psi_set
w_sampler.y = np.array(s_set).reshape(-1,1)
w_samples = w_sampler.sample(M)
mean_w_samples = np.mean(w_samples, axis=0)
print('Samples so far: ' + str(N))
print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
def nonbatch(task, method, N, M):
simulation_object = create_env(task)
d = simulation_object.num_of_features
w_true = 2*np.random.rand(d)-1
w_true = w_true / np.linalg.norm(w_true)
print('If in automated mode: true w = {}'.format(w_true/np.linalg.norm(w_true)))
lower_input_bound = [x[0] for x in simulation_object.feed_bounds]
upper_input_bound = [x[1] for x in simulation_object.feed_bounds]
w_sampler = Sampler(d)
psi_set = []
s_set = []
for i in range(N):
w_sampler.A = psi_set
w_sampler.y = np.array(s_set).reshape(-1,1)
w_samples = w_sampler.sample(M)
mean_w_samples = np.mean(w_samples,axis=0)
print('Samples so far: ' + str(i))
print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
input_A, input_B = run_algo(method, simulation_object, w_samples)
psi, s = get_feedback(simulation_object, input_A, input_B, w_true)
psi_set.append(psi)
s_set.append(s)
w_sampler.A = psi_set
w_sampler.y = np.array(s_set).reshape(-1,1)
w_samples = w_sampler.sample(M)
print('Samples so far: ' + str(N))
print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
| 37.896552
| 97
| 0.653625
| 539
| 3,297
| 3.729128
| 0.153989
| 0.111443
| 0.131343
| 0.051741
| 0.848259
| 0.848259
| 0.848259
| 0.775622
| 0.775622
| 0.775622
| 0
| 0.009249
| 0.212921
| 3,297
| 86
| 98
| 38.337209
| 0.765318
| 0
| 0
| 0.69863
| 0
| 0
| 0.081942
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027397
| false
| 0
| 0.068493
| 0
| 0.09589
| 0.205479
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
465fe7e5576677af07959914f08737be5fcfe513
| 38
|
py
|
Python
|
wsgi.py
|
darvelo/ether-website
|
eeaa728fca057e0edffe7cd31eafc6500d15003f
|
[
"MIT"
] | null | null | null |
wsgi.py
|
darvelo/ether-website
|
eeaa728fca057e0edffe7cd31eafc6500d15003f
|
[
"MIT"
] | 14
|
2018-02-21T17:58:33.000Z
|
2022-03-11T23:16:09.000Z
|
wsgi.py
|
darvelo/ether-website
|
eeaa728fca057e0edffe7cd31eafc6500d15003f
|
[
"MIT"
] | 1
|
2018-02-22T09:28:26.000Z
|
2018-02-22T09:28:26.000Z
|
from server import app as application
| 19
| 37
| 0.842105
| 6
| 38
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 38
| 1
| 38
| 38
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4661600efc505d2fcc22c3fcfa74b5bd3ee2a6c0
| 26
|
py
|
Python
|
tests/history/__init__.py
|
lievertom/2020.2-Projeto-Kokama-Ensino
|
47d5f1a1b31badb4a4306339e7302e8b4ce7ba4c
|
[
"MIT"
] | null | null | null |
tests/history/__init__.py
|
lievertom/2020.2-Projeto-Kokama-Ensino
|
47d5f1a1b31badb4a4306339e7302e8b4ce7ba4c
|
[
"MIT"
] | 2
|
2021-05-07T21:46:08.000Z
|
2021-05-07T21:48:23.000Z
|
tests/history/__init__.py
|
lievertom/2020.2-Projeto-Kokama-Ensino
|
47d5f1a1b31badb4a4306339e7302e8b4ce7ba4c
|
[
"MIT"
] | null | null | null |
from .test_sample import *
| 26
| 26
| 0.807692
| 4
| 26
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
46753aab31f873785a7e03ce1e53b5f27c953f15
| 104
|
py
|
Python
|
layer/computing/test_af00.py
|
hslee1539/NN
|
8b60a858c1137785ef684dd548b008bcc46b8d6d
|
[
"MIT"
] | null | null | null |
layer/computing/test_af00.py
|
hslee1539/NN
|
8b60a858c1137785ef684dd548b008bcc46b8d6d
|
[
"MIT"
] | null | null | null |
layer/computing/test_af00.py
|
hslee1539/NN
|
8b60a858c1137785ef684dd548b008bcc46b8d6d
|
[
"MIT"
] | null | null | null |
def forward(x_array, out_array):
for i in range(len(x_array)):
out_array[i] = x_array[i] + 0
| 34.666667
| 37
| 0.634615
| 20
| 104
| 3.05
| 0.55
| 0.295082
| 0.295082
| 0.459016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012346
| 0.221154
| 104
| 3
| 37
| 34.666667
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
469be8f846e4d28647a5a9b073ac5d0a41a9c581
| 44
|
py
|
Python
|
vasapy/__init__.py
|
cosama/vasapy
|
efb43b2ab36641416a84c2a8f3432487e9618c6e
|
[
"Apache-2.0"
] | 1
|
2020-07-30T22:37:07.000Z
|
2020-07-30T22:37:07.000Z
|
vasapy/__init__.py
|
cosama/vasapy
|
efb43b2ab36641416a84c2a8f3432487e9618c6e
|
[
"Apache-2.0"
] | 2
|
2021-05-04T18:21:46.000Z
|
2021-05-04T19:02:22.000Z
|
vasapy/__init__.py
|
cosama/vasapy
|
efb43b2ab36641416a84c2a8f3432487e9618c6e
|
[
"Apache-2.0"
] | null | null | null |
from .dict import dict
from .set import set
| 14.666667
| 22
| 0.772727
| 8
| 44
| 4.25
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 2
| 23
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d3ca7dded9f7043e120ccb537a94457b39e83202
| 258,315
|
py
|
Python
|
instances/passenger_demand/pas-20210422-1717-int1/74.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210422-1717-int1/74.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210422-1717-int1/74.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 19150
passenger_arriving = (
(4, 5, 4, 9, 0, 2, 3, 0, 2, 1, 0, 1, 0, 4, 2, 4, 3, 4, 1, 2, 0, 0, 1, 1, 1, 0), # 0
(7, 6, 6, 3, 2, 3, 0, 1, 2, 1, 0, 1, 0, 2, 4, 2, 5, 5, 3, 2, 0, 3, 2, 1, 2, 0), # 1
(9, 5, 5, 2, 3, 1, 1, 2, 4, 3, 0, 0, 0, 6, 10, 2, 4, 5, 5, 2, 2, 2, 2, 1, 0, 0), # 2
(5, 2, 4, 2, 3, 4, 0, 3, 0, 3, 0, 0, 0, 8, 5, 1, 3, 7, 4, 5, 2, 6, 0, 1, 1, 0), # 3
(5, 5, 8, 9, 9, 3, 4, 1, 2, 1, 0, 3, 0, 6, 7, 1, 2, 6, 2, 0, 1, 1, 6, 0, 0, 0), # 4
(4, 10, 4, 2, 5, 4, 1, 4, 1, 2, 0, 0, 0, 8, 2, 3, 4, 6, 3, 1, 0, 5, 3, 1, 0, 0), # 5
(6, 3, 11, 3, 7, 4, 1, 1, 1, 0, 2, 1, 0, 8, 7, 4, 1, 3, 4, 3, 2, 5, 2, 1, 1, 0), # 6
(1, 6, 3, 6, 7, 4, 3, 3, 0, 1, 1, 5, 0, 8, 4, 5, 10, 7, 3, 1, 1, 3, 0, 1, 1, 0), # 7
(8, 7, 9, 11, 5, 2, 3, 3, 2, 1, 0, 0, 0, 10, 7, 7, 2, 6, 2, 1, 2, 4, 2, 0, 2, 0), # 8
(9, 6, 6, 8, 6, 2, 2, 4, 3, 3, 2, 1, 0, 5, 8, 7, 6, 8, 2, 5, 2, 6, 1, 4, 2, 0), # 9
(8, 7, 12, 6, 12, 5, 2, 1, 0, 1, 0, 0, 0, 10, 5, 3, 3, 13, 8, 6, 1, 5, 2, 2, 0, 0), # 10
(11, 7, 10, 4, 6, 1, 1, 4, 5, 2, 1, 2, 0, 9, 7, 5, 8, 5, 3, 4, 4, 3, 4, 0, 0, 0), # 11
(14, 8, 8, 7, 2, 1, 6, 2, 5, 3, 0, 0, 0, 7, 10, 8, 6, 10, 4, 3, 2, 2, 0, 1, 1, 0), # 12
(10, 10, 10, 7, 7, 3, 1, 5, 2, 5, 4, 0, 0, 8, 2, 9, 6, 8, 6, 6, 0, 3, 5, 2, 2, 0), # 13
(13, 9, 13, 6, 6, 4, 8, 4, 3, 0, 1, 0, 0, 9, 12, 7, 2, 8, 2, 4, 2, 4, 2, 2, 1, 0), # 14
(9, 14, 9, 12, 5, 2, 3, 7, 3, 1, 1, 2, 0, 9, 7, 5, 6, 11, 1, 3, 3, 3, 3, 0, 1, 0), # 15
(16, 12, 5, 8, 7, 2, 5, 5, 5, 2, 2, 1, 0, 7, 12, 4, 5, 8, 3, 5, 3, 3, 2, 5, 0, 0), # 16
(9, 19, 6, 11, 4, 4, 1, 8, 7, 2, 1, 0, 0, 9, 5, 7, 11, 8, 7, 6, 0, 6, 2, 2, 1, 0), # 17
(8, 6, 7, 13, 6, 2, 4, 3, 5, 2, 1, 0, 0, 13, 10, 3, 8, 5, 8, 3, 0, 7, 4, 0, 0, 0), # 18
(9, 7, 11, 6, 7, 6, 6, 2, 2, 2, 0, 0, 0, 13, 14, 7, 0, 4, 11, 3, 4, 5, 4, 0, 0, 0), # 19
(5, 9, 13, 6, 9, 2, 4, 5, 7, 4, 0, 0, 0, 16, 3, 10, 8, 6, 2, 3, 3, 2, 1, 2, 1, 0), # 20
(13, 10, 8, 13, 8, 3, 8, 6, 1, 2, 2, 0, 0, 11, 10, 4, 10, 9, 4, 6, 1, 3, 5, 1, 1, 0), # 21
(11, 11, 7, 5, 10, 4, 11, 4, 6, 0, 1, 0, 0, 11, 9, 11, 6, 8, 8, 2, 4, 7, 0, 1, 1, 0), # 22
(11, 5, 4, 12, 10, 4, 2, 1, 1, 1, 1, 0, 0, 12, 10, 6, 8, 16, 5, 1, 0, 3, 4, 3, 1, 0), # 23
(16, 11, 8, 9, 6, 2, 3, 4, 4, 1, 1, 0, 0, 8, 10, 3, 9, 9, 6, 1, 1, 2, 2, 2, 2, 0), # 24
(14, 10, 12, 11, 8, 3, 3, 3, 2, 0, 1, 1, 0, 8, 8, 10, 4, 9, 3, 7, 2, 2, 0, 1, 1, 0), # 25
(11, 10, 13, 14, 6, 6, 5, 5, 0, 4, 1, 0, 0, 11, 12, 7, 5, 8, 8, 4, 0, 6, 0, 1, 2, 0), # 26
(14, 8, 9, 9, 3, 4, 1, 7, 5, 1, 2, 1, 0, 10, 6, 7, 9, 3, 7, 1, 3, 5, 3, 3, 0, 0), # 27
(10, 9, 8, 10, 12, 5, 3, 9, 2, 2, 1, 0, 0, 7, 7, 7, 7, 8, 2, 7, 1, 3, 4, 1, 0, 0), # 28
(11, 5, 5, 9, 9, 1, 7, 5, 6, 1, 2, 1, 0, 11, 6, 13, 7, 5, 6, 4, 1, 5, 3, 1, 1, 0), # 29
(11, 15, 11, 8, 6, 7, 4, 4, 3, 3, 2, 2, 0, 8, 11, 5, 5, 8, 9, 5, 3, 7, 4, 1, 0, 0), # 30
(8, 11, 7, 6, 12, 2, 5, 5, 5, 1, 1, 0, 0, 10, 12, 9, 7, 6, 8, 6, 1, 9, 2, 0, 0, 0), # 31
(6, 12, 8, 15, 12, 4, 2, 4, 9, 0, 1, 1, 0, 7, 7, 8, 8, 9, 4, 4, 2, 1, 3, 2, 1, 0), # 32
(10, 13, 9, 13, 9, 2, 4, 3, 3, 1, 0, 0, 0, 4, 13, 7, 6, 3, 8, 6, 1, 2, 3, 1, 1, 0), # 33
(13, 6, 6, 12, 7, 2, 3, 2, 6, 3, 0, 3, 0, 14, 5, 7, 5, 10, 5, 3, 1, 6, 4, 0, 0, 0), # 34
(19, 18, 2, 9, 13, 8, 4, 5, 6, 1, 3, 1, 0, 10, 10, 5, 6, 10, 4, 1, 3, 4, 1, 0, 0, 0), # 35
(12, 10, 10, 7, 15, 2, 4, 4, 6, 3, 0, 0, 0, 6, 9, 2, 5, 6, 4, 2, 1, 4, 4, 1, 0, 0), # 36
(10, 18, 5, 12, 9, 10, 5, 2, 2, 0, 1, 1, 0, 7, 3, 9, 7, 12, 3, 3, 2, 3, 1, 2, 0, 0), # 37
(8, 11, 8, 18, 4, 7, 0, 4, 1, 2, 3, 1, 0, 10, 10, 6, 5, 8, 4, 8, 5, 7, 5, 1, 1, 0), # 38
(17, 8, 7, 5, 13, 4, 1, 2, 3, 1, 2, 0, 0, 12, 12, 6, 5, 8, 4, 6, 3, 8, 1, 2, 0, 0), # 39
(13, 7, 10, 14, 1, 0, 2, 5, 5, 3, 0, 1, 0, 14, 12, 6, 5, 9, 6, 8, 1, 2, 5, 1, 2, 0), # 40
(8, 7, 4, 13, 4, 2, 3, 4, 1, 1, 3, 2, 0, 6, 10, 3, 7, 5, 3, 7, 1, 7, 4, 1, 0, 0), # 41
(11, 13, 7, 10, 9, 6, 1, 4, 5, 1, 1, 0, 0, 12, 11, 3, 3, 5, 8, 5, 2, 1, 1, 1, 0, 0), # 42
(9, 4, 7, 5, 13, 3, 6, 7, 5, 3, 1, 0, 0, 9, 9, 8, 3, 8, 6, 5, 4, 4, 5, 1, 1, 0), # 43
(13, 9, 9, 6, 11, 4, 0, 7, 3, 5, 2, 0, 0, 10, 9, 6, 4, 10, 5, 5, 3, 7, 4, 1, 0, 0), # 44
(6, 7, 10, 13, 9, 4, 11, 5, 5, 1, 0, 1, 0, 10, 12, 7, 7, 4, 6, 2, 3, 4, 4, 0, 2, 0), # 45
(9, 12, 8, 13, 7, 1, 4, 3, 6, 4, 4, 0, 0, 9, 13, 4, 6, 13, 4, 3, 6, 3, 5, 3, 1, 0), # 46
(12, 14, 5, 9, 12, 2, 7, 6, 5, 1, 0, 0, 0, 10, 10, 3, 2, 6, 3, 7, 6, 5, 2, 0, 1, 0), # 47
(6, 7, 9, 20, 10, 3, 9, 1, 6, 1, 1, 2, 0, 6, 7, 8, 5, 9, 3, 6, 3, 6, 0, 2, 2, 0), # 48
(10, 10, 3, 7, 8, 5, 3, 1, 2, 6, 3, 2, 0, 14, 10, 4, 1, 6, 1, 1, 3, 4, 3, 1, 0, 0), # 49
(15, 10, 16, 10, 3, 6, 3, 6, 7, 1, 2, 0, 0, 16, 4, 11, 9, 13, 4, 1, 5, 2, 5, 2, 1, 0), # 50
(9, 7, 5, 11, 7, 1, 4, 4, 5, 2, 3, 0, 0, 6, 5, 7, 8, 11, 6, 6, 1, 6, 1, 3, 0, 0), # 51
(7, 11, 7, 10, 4, 3, 4, 1, 7, 0, 2, 1, 0, 6, 10, 7, 5, 12, 9, 2, 2, 4, 2, 1, 0, 0), # 52
(4, 12, 7, 6, 8, 4, 2, 7, 4, 1, 2, 3, 0, 7, 9, 7, 5, 4, 5, 2, 2, 1, 4, 1, 1, 0), # 53
(3, 6, 8, 8, 12, 3, 1, 6, 8, 1, 2, 0, 0, 4, 8, 5, 7, 6, 4, 3, 3, 5, 2, 2, 1, 0), # 54
(14, 4, 3, 10, 9, 3, 3, 1, 1, 3, 1, 0, 0, 8, 11, 3, 1, 5, 6, 4, 4, 3, 3, 0, 1, 0), # 55
(16, 6, 11, 6, 10, 3, 1, 7, 3, 1, 2, 0, 0, 13, 6, 6, 2, 11, 2, 5, 0, 5, 3, 1, 0, 0), # 56
(7, 12, 7, 6, 9, 4, 3, 4, 4, 5, 2, 1, 0, 7, 11, 12, 7, 6, 6, 3, 2, 3, 3, 3, 1, 0), # 57
(10, 12, 7, 9, 9, 2, 5, 7, 9, 2, 0, 1, 0, 14, 13, 6, 6, 7, 3, 6, 2, 2, 5, 2, 1, 0), # 58
(10, 8, 7, 9, 4, 4, 4, 0, 1, 3, 1, 0, 0, 7, 11, 15, 4, 7, 2, 2, 1, 2, 4, 0, 1, 0), # 59
(6, 13, 6, 3, 11, 6, 2, 2, 6, 1, 3, 2, 0, 12, 11, 9, 4, 10, 4, 6, 9, 4, 4, 4, 1, 0), # 60
(3, 17, 7, 4, 10, 5, 3, 3, 5, 0, 0, 4, 0, 11, 5, 11, 11, 6, 2, 3, 3, 0, 3, 1, 1, 0), # 61
(15, 9, 10, 11, 8, 3, 2, 4, 3, 1, 4, 0, 0, 7, 6, 6, 4, 8, 5, 4, 4, 2, 3, 1, 0, 0), # 62
(6, 12, 8, 9, 9, 4, 1, 1, 5, 1, 2, 0, 0, 9, 5, 7, 3, 7, 6, 8, 1, 3, 7, 2, 0, 0), # 63
(8, 6, 9, 8, 15, 6, 5, 2, 4, 2, 0, 0, 0, 9, 9, 7, 6, 7, 2, 1, 5, 8, 2, 4, 0, 0), # 64
(14, 10, 2, 9, 3, 2, 2, 5, 3, 2, 1, 0, 0, 7, 11, 3, 5, 20, 5, 1, 6, 0, 5, 2, 1, 0), # 65
(8, 9, 8, 9, 6, 3, 7, 3, 9, 5, 2, 3, 0, 10, 3, 9, 5, 5, 7, 6, 2, 7, 1, 3, 1, 0), # 66
(16, 10, 9, 8, 4, 3, 3, 2, 3, 2, 2, 1, 0, 5, 10, 10, 8, 5, 5, 7, 0, 5, 3, 0, 1, 0), # 67
(9, 3, 9, 9, 8, 3, 5, 1, 4, 1, 0, 1, 0, 9, 6, 3, 4, 8, 10, 4, 3, 7, 3, 2, 1, 0), # 68
(13, 5, 10, 3, 5, 3, 3, 3, 4, 0, 2, 0, 0, 10, 9, 7, 6, 6, 2, 3, 1, 6, 2, 1, 0, 0), # 69
(11, 9, 16, 10, 7, 6, 0, 5, 6, 2, 3, 0, 0, 13, 10, 7, 3, 12, 6, 3, 3, 3, 4, 0, 1, 0), # 70
(9, 8, 8, 5, 9, 3, 2, 2, 4, 3, 3, 0, 0, 9, 9, 8, 5, 3, 12, 2, 1, 7, 0, 1, 0, 0), # 71
(11, 7, 9, 10, 9, 2, 4, 4, 6, 3, 0, 1, 0, 6, 10, 7, 6, 7, 4, 7, 2, 4, 4, 2, 1, 0), # 72
(8, 6, 8, 15, 12, 0, 4, 3, 1, 2, 1, 1, 0, 11, 6, 5, 9, 6, 5, 2, 4, 4, 2, 2, 0, 0), # 73
(8, 7, 11, 9, 8, 4, 3, 5, 8, 0, 3, 1, 0, 11, 5, 8, 7, 8, 3, 4, 1, 0, 3, 1, 1, 0), # 74
(10, 10, 8, 7, 3, 2, 4, 7, 10, 2, 1, 0, 0, 13, 5, 6, 8, 10, 2, 4, 1, 4, 4, 4, 2, 0), # 75
(10, 12, 11, 7, 5, 6, 3, 2, 4, 1, 1, 1, 0, 9, 8, 6, 5, 13, 3, 6, 1, 5, 3, 0, 0, 0), # 76
(7, 8, 7, 9, 8, 8, 5, 4, 5, 2, 3, 1, 0, 7, 9, 5, 4, 8, 7, 4, 3, 0, 1, 2, 1, 0), # 77
(6, 8, 8, 9, 7, 6, 7, 5, 3, 4, 2, 1, 0, 8, 9, 7, 5, 7, 6, 5, 2, 7, 1, 1, 0, 0), # 78
(13, 14, 7, 8, 6, 3, 6, 2, 5, 1, 1, 1, 0, 12, 7, 6, 4, 8, 4, 4, 1, 7, 3, 0, 0, 0), # 79
(15, 10, 12, 8, 5, 3, 4, 4, 4, 0, 2, 2, 0, 6, 6, 9, 8, 9, 3, 1, 3, 7, 2, 1, 1, 0), # 80
(15, 8, 12, 11, 12, 1, 3, 5, 3, 1, 4, 0, 0, 18, 9, 4, 4, 4, 4, 3, 3, 3, 0, 1, 0, 0), # 81
(7, 5, 9, 9, 9, 6, 4, 3, 7, 3, 1, 1, 0, 9, 6, 10, 2, 11, 1, 8, 2, 5, 4, 3, 2, 0), # 82
(12, 10, 9, 7, 8, 5, 7, 5, 2, 3, 0, 1, 0, 12, 9, 10, 4, 8, 1, 5, 1, 6, 3, 2, 1, 0), # 83
(8, 10, 7, 11, 9, 3, 1, 3, 7, 1, 3, 1, 0, 9, 13, 5, 9, 6, 3, 6, 1, 2, 4, 0, 0, 0), # 84
(10, 8, 5, 4, 8, 3, 1, 0, 4, 2, 1, 0, 0, 7, 2, 7, 5, 13, 4, 4, 4, 1, 6, 1, 1, 0), # 85
(9, 6, 6, 11, 8, 2, 3, 5, 3, 1, 2, 2, 0, 12, 5, 7, 6, 5, 3, 2, 2, 5, 5, 1, 0, 0), # 86
(8, 9, 12, 7, 9, 6, 5, 3, 6, 1, 0, 1, 0, 12, 12, 5, 4, 8, 4, 3, 1, 3, 6, 0, 0, 0), # 87
(5, 7, 12, 8, 8, 3, 3, 1, 1, 3, 1, 1, 0, 8, 8, 6, 8, 8, 3, 3, 0, 2, 2, 2, 0, 0), # 88
(10, 10, 4, 3, 7, 2, 2, 0, 3, 1, 1, 1, 0, 16, 11, 7, 6, 5, 2, 2, 2, 1, 2, 0, 0, 0), # 89
(6, 13, 5, 13, 7, 4, 2, 4, 7, 3, 2, 0, 0, 8, 6, 5, 3, 9, 4, 5, 1, 3, 2, 2, 1, 0), # 90
(8, 11, 11, 7, 11, 4, 2, 3, 2, 2, 0, 0, 0, 10, 4, 8, 8, 7, 3, 4, 3, 2, 4, 2, 1, 0), # 91
(10, 8, 6, 10, 7, 5, 5, 6, 2, 1, 1, 1, 0, 5, 10, 10, 4, 11, 6, 5, 3, 2, 4, 3, 0, 0), # 92
(7, 7, 11, 5, 6, 1, 6, 2, 4, 1, 1, 2, 0, 12, 9, 8, 2, 9, 3, 5, 0, 5, 3, 1, 0, 0), # 93
(15, 6, 5, 5, 10, 4, 3, 1, 6, 0, 2, 1, 0, 9, 8, 6, 4, 7, 5, 5, 1, 6, 3, 2, 0, 0), # 94
(11, 4, 3, 9, 8, 3, 3, 4, 8, 1, 0, 1, 0, 10, 14, 1, 4, 9, 3, 1, 0, 2, 2, 1, 1, 0), # 95
(8, 10, 7, 6, 4, 1, 6, 1, 2, 0, 3, 1, 0, 19, 6, 4, 7, 7, 6, 0, 4, 4, 5, 3, 1, 0), # 96
(9, 9, 8, 12, 10, 3, 1, 4, 3, 1, 1, 0, 0, 12, 10, 8, 5, 8, 4, 3, 1, 2, 2, 3, 1, 0), # 97
(9, 10, 6, 9, 10, 2, 5, 3, 3, 3, 2, 2, 0, 14, 9, 5, 7, 8, 4, 2, 2, 5, 1, 0, 0, 0), # 98
(11, 7, 8, 8, 3, 7, 5, 3, 3, 0, 0, 0, 0, 11, 6, 5, 7, 11, 5, 5, 3, 5, 1, 1, 2, 0), # 99
(6, 2, 9, 6, 10, 3, 3, 3, 5, 0, 1, 2, 0, 12, 8, 9, 4, 10, 3, 3, 2, 6, 3, 3, 0, 0), # 100
(11, 11, 10, 7, 2, 6, 3, 4, 6, 5, 0, 0, 0, 7, 8, 5, 6, 13, 7, 3, 2, 3, 1, 1, 0, 0), # 101
(7, 7, 2, 7, 4, 3, 1, 3, 4, 2, 0, 0, 0, 16, 4, 3, 3, 5, 4, 3, 0, 3, 2, 1, 0, 0), # 102
(10, 8, 12, 3, 6, 3, 7, 5, 4, 1, 1, 2, 0, 7, 5, 5, 5, 4, 5, 3, 1, 0, 3, 4, 0, 0), # 103
(15, 12, 7, 6, 5, 3, 3, 4, 3, 1, 1, 0, 0, 5, 3, 11, 6, 8, 5, 2, 1, 4, 1, 1, 0, 0), # 104
(12, 10, 8, 8, 8, 3, 4, 5, 4, 2, 1, 0, 0, 14, 8, 4, 3, 6, 5, 3, 1, 3, 2, 2, 0, 0), # 105
(6, 4, 9, 13, 4, 2, 1, 1, 7, 3, 0, 0, 0, 14, 7, 5, 3, 6, 4, 5, 2, 8, 0, 2, 0, 0), # 106
(8, 3, 9, 8, 6, 0, 5, 5, 2, 2, 1, 1, 0, 7, 6, 7, 6, 6, 3, 4, 2, 4, 2, 2, 2, 0), # 107
(12, 9, 11, 9, 10, 2, 3, 4, 1, 1, 0, 0, 0, 7, 9, 7, 3, 4, 2, 2, 1, 4, 1, 1, 0, 0), # 108
(13, 9, 8, 10, 6, 6, 4, 2, 3, 1, 2, 1, 0, 15, 10, 6, 3, 7, 6, 5, 4, 3, 5, 2, 0, 0), # 109
(10, 11, 8, 15, 7, 1, 3, 3, 7, 2, 1, 1, 0, 9, 10, 9, 5, 9, 2, 6, 1, 3, 2, 0, 0, 0), # 110
(9, 2, 6, 7, 7, 7, 3, 4, 4, 0, 0, 0, 0, 3, 9, 9, 3, 2, 4, 2, 5, 5, 1, 3, 0, 0), # 111
(8, 4, 14, 13, 6, 1, 3, 3, 3, 2, 2, 1, 0, 8, 6, 7, 5, 7, 2, 2, 1, 2, 6, 3, 0, 0), # 112
(5, 12, 5, 5, 6, 1, 1, 3, 1, 1, 1, 0, 0, 12, 9, 10, 2, 4, 3, 5, 5, 2, 6, 1, 0, 0), # 113
(9, 9, 7, 12, 7, 2, 5, 2, 6, 0, 0, 0, 0, 12, 7, 5, 6, 10, 3, 3, 1, 5, 0, 0, 1, 0), # 114
(11, 8, 5, 9, 8, 2, 1, 1, 8, 1, 1, 1, 0, 9, 8, 8, 6, 7, 2, 4, 5, 0, 2, 3, 0, 0), # 115
(12, 7, 6, 7, 7, 2, 6, 2, 2, 2, 1, 0, 0, 16, 3, 4, 3, 7, 7, 8, 2, 7, 2, 0, 0, 0), # 116
(5, 11, 14, 7, 4, 3, 1, 2, 2, 2, 2, 0, 0, 7, 7, 8, 4, 5, 0, 2, 2, 6, 0, 3, 0, 0), # 117
(8, 10, 5, 5, 10, 2, 2, 3, 1, 1, 1, 0, 0, 4, 9, 4, 5, 8, 6, 3, 3, 5, 3, 1, 0, 0), # 118
(11, 8, 8, 9, 9, 5, 1, 2, 1, 2, 3, 1, 0, 9, 8, 8, 3, 12, 3, 3, 3, 3, 1, 2, 0, 0), # 119
(3, 5, 3, 9, 3, 2, 3, 2, 8, 3, 2, 0, 0, 7, 7, 6, 2, 8, 3, 3, 2, 1, 1, 1, 0, 0), # 120
(8, 8, 9, 11, 14, 2, 4, 0, 6, 2, 1, 1, 0, 14, 8, 7, 4, 7, 2, 4, 2, 5, 2, 0, 1, 0), # 121
(10, 5, 6, 13, 8, 3, 5, 4, 5, 2, 4, 2, 0, 12, 5, 6, 5, 7, 4, 4, 2, 4, 1, 0, 0, 0), # 122
(9, 10, 5, 8, 11, 6, 4, 1, 2, 0, 1, 0, 0, 9, 9, 8, 9, 6, 2, 2, 5, 2, 0, 0, 1, 0), # 123
(4, 6, 6, 7, 9, 3, 0, 4, 8, 2, 3, 2, 0, 7, 6, 1, 5, 5, 3, 1, 2, 1, 4, 3, 0, 0), # 124
(4, 8, 5, 6, 9, 2, 3, 2, 2, 1, 3, 0, 0, 7, 6, 3, 5, 7, 5, 5, 1, 1, 3, 0, 1, 0), # 125
(8, 4, 4, 9, 11, 6, 0, 3, 4, 0, 0, 0, 0, 10, 7, 3, 3, 4, 5, 5, 3, 4, 1, 1, 2, 0), # 126
(8, 7, 8, 5, 7, 2, 2, 4, 2, 1, 0, 0, 0, 17, 8, 2, 6, 7, 6, 3, 3, 3, 3, 2, 2, 0), # 127
(11, 4, 6, 10, 6, 6, 3, 4, 2, 0, 0, 3, 0, 6, 4, 8, 1, 5, 2, 2, 1, 4, 2, 1, 0, 0), # 128
(9, 10, 11, 6, 6, 6, 1, 2, 5, 0, 2, 3, 0, 13, 6, 7, 4, 3, 2, 2, 4, 3, 4, 1, 0, 0), # 129
(10, 9, 7, 8, 10, 5, 4, 1, 6, 0, 3, 0, 0, 13, 5, 8, 4, 2, 1, 2, 1, 3, 2, 0, 1, 0), # 130
(10, 6, 4, 9, 5, 3, 3, 2, 4, 3, 0, 1, 0, 10, 10, 2, 5, 6, 4, 1, 1, 5, 1, 1, 2, 0), # 131
(7, 5, 6, 10, 6, 2, 7, 5, 5, 2, 1, 3, 0, 4, 7, 8, 5, 9, 4, 4, 1, 5, 3, 0, 2, 0), # 132
(7, 1, 2, 9, 9, 5, 1, 0, 2, 1, 1, 0, 0, 13, 2, 4, 3, 5, 2, 4, 4, 6, 1, 3, 1, 0), # 133
(3, 10, 5, 6, 8, 1, 2, 0, 2, 1, 1, 0, 0, 8, 9, 3, 3, 3, 3, 2, 4, 1, 2, 1, 0, 0), # 134
(5, 5, 4, 8, 4, 1, 3, 1, 5, 1, 1, 0, 0, 6, 11, 7, 1, 7, 4, 2, 1, 5, 1, 1, 0, 0), # 135
(6, 9, 6, 10, 8, 2, 2, 2, 1, 2, 1, 0, 0, 9, 5, 11, 3, 5, 1, 3, 4, 2, 4, 0, 0, 0), # 136
(15, 7, 8, 9, 5, 1, 0, 1, 5, 1, 0, 1, 0, 7, 7, 8, 1, 3, 4, 4, 4, 3, 1, 3, 1, 0), # 137
(14, 2, 6, 8, 6, 3, 2, 1, 3, 1, 0, 1, 0, 8, 8, 9, 4, 3, 6, 3, 0, 4, 3, 2, 0, 0), # 138
(14, 6, 9, 7, 21, 1, 2, 2, 1, 1, 0, 1, 0, 5, 6, 5, 1, 8, 0, 6, 2, 2, 2, 0, 0, 0), # 139
(14, 9, 6, 6, 11, 5, 3, 4, 5, 3, 1, 2, 0, 4, 9, 7, 5, 7, 2, 1, 1, 6, 3, 2, 0, 0), # 140
(7, 3, 8, 5, 3, 3, 3, 1, 6, 0, 1, 0, 0, 10, 8, 7, 3, 7, 7, 5, 0, 4, 1, 1, 1, 0), # 141
(8, 5, 4, 12, 3, 0, 2, 2, 2, 1, 0, 0, 0, 11, 9, 9, 2, 6, 2, 2, 2, 2, 2, 1, 1, 0), # 142
(12, 5, 6, 10, 3, 1, 3, 2, 1, 3, 0, 0, 0, 8, 11, 8, 5, 7, 6, 2, 1, 2, 2, 2, 0, 0), # 143
(12, 6, 10, 7, 9, 4, 2, 3, 2, 2, 1, 2, 0, 7, 8, 4, 6, 5, 5, 2, 1, 4, 0, 4, 0, 0), # 144
(11, 8, 6, 13, 6, 4, 1, 2, 3, 1, 0, 0, 0, 16, 7, 5, 2, 6, 6, 3, 4, 6, 1, 0, 0, 0), # 145
(11, 4, 7, 9, 6, 3, 5, 5, 3, 1, 0, 1, 0, 3, 8, 7, 6, 7, 9, 5, 0, 3, 3, 1, 0, 0), # 146
(8, 6, 6, 3, 7, 4, 3, 1, 6, 2, 1, 1, 0, 7, 9, 2, 4, 13, 1, 1, 2, 3, 2, 1, 1, 0), # 147
(8, 4, 5, 7, 11, 2, 0, 0, 4, 0, 2, 2, 0, 5, 8, 6, 4, 7, 7, 0, 1, 3, 2, 1, 0, 0), # 148
(9, 5, 5, 12, 5, 4, 2, 2, 6, 0, 2, 0, 0, 9, 4, 5, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0), # 149
(13, 3, 5, 4, 5, 0, 4, 6, 0, 0, 0, 1, 0, 10, 5, 7, 4, 4, 3, 4, 1, 2, 4, 2, 1, 0), # 150
(13, 3, 5, 7, 6, 10, 2, 1, 5, 0, 0, 0, 0, 9, 10, 6, 5, 5, 2, 4, 0, 3, 1, 0, 1, 0), # 151
(6, 7, 3, 8, 7, 3, 0, 1, 7, 2, 1, 0, 0, 5, 9, 7, 6, 6, 2, 1, 2, 2, 3, 0, 0, 0), # 152
(6, 6, 7, 11, 3, 5, 3, 2, 2, 1, 1, 0, 0, 8, 6, 8, 4, 6, 3, 0, 3, 2, 1, 3, 1, 0), # 153
(8, 10, 4, 13, 7, 4, 3, 3, 2, 1, 0, 0, 0, 13, 6, 5, 3, 5, 3, 1, 2, 2, 6, 1, 0, 0), # 154
(6, 4, 8, 1, 2, 1, 1, 1, 3, 0, 0, 0, 0, 11, 10, 7, 7, 9, 4, 3, 1, 3, 1, 0, 0, 0), # 155
(8, 6, 4, 6, 4, 2, 1, 5, 1, 1, 0, 0, 0, 5, 5, 2, 6, 10, 7, 5, 3, 2, 2, 1, 2, 0), # 156
(6, 5, 6, 9, 9, 2, 1, 1, 1, 1, 2, 1, 0, 12, 11, 2, 0, 7, 4, 2, 5, 3, 3, 1, 0, 0), # 157
(2, 2, 6, 5, 9, 3, 2, 5, 4, 1, 3, 0, 0, 9, 5, 8, 2, 13, 6, 3, 2, 0, 2, 1, 0, 0), # 158
(3, 6, 6, 5, 6, 3, 0, 4, 5, 0, 0, 1, 0, 12, 6, 3, 3, 5, 5, 4, 3, 1, 2, 0, 0, 0), # 159
(9, 4, 5, 6, 16, 5, 1, 4, 3, 1, 1, 0, 0, 6, 12, 4, 4, 11, 5, 7, 3, 2, 1, 1, 1, 0), # 160
(3, 5, 6, 9, 10, 7, 4, 2, 4, 1, 0, 0, 0, 7, 5, 3, 2, 8, 3, 3, 4, 4, 3, 0, 0, 0), # 161
(6, 7, 4, 4, 6, 2, 0, 4, 3, 1, 0, 1, 0, 6, 6, 3, 3, 8, 1, 2, 2, 2, 1, 3, 0, 0), # 162
(5, 4, 9, 6, 9, 5, 2, 4, 3, 0, 2, 1, 0, 12, 4, 2, 3, 4, 3, 0, 1, 3, 1, 2, 1, 0), # 163
(8, 4, 7, 3, 7, 1, 0, 0, 3, 2, 0, 3, 0, 6, 7, 5, 2, 6, 3, 2, 4, 1, 0, 4, 0, 0), # 164
(4, 5, 9, 4, 6, 1, 1, 3, 3, 2, 2, 0, 0, 3, 4, 3, 5, 7, 1, 2, 5, 2, 8, 2, 1, 0), # 165
(4, 3, 3, 6, 2, 1, 1, 0, 6, 1, 0, 0, 0, 8, 6, 2, 5, 6, 1, 1, 1, 1, 2, 0, 0, 0), # 166
(11, 7, 7, 4, 9, 1, 1, 1, 2, 1, 1, 0, 0, 5, 8, 2, 0, 11, 2, 1, 3, 4, 4, 0, 3, 0), # 167
(7, 6, 2, 4, 1, 2, 1, 2, 4, 1, 1, 0, 0, 8, 1, 4, 5, 3, 2, 3, 2, 4, 2, 1, 0, 0), # 168
(8, 5, 2, 4, 6, 1, 0, 0, 1, 0, 0, 1, 0, 10, 4, 5, 3, 8, 4, 3, 1, 2, 2, 2, 0, 0), # 169
(5, 5, 5, 3, 7, 1, 4, 0, 0, 5, 1, 0, 0, 7, 6, 3, 2, 5, 3, 2, 1, 1, 2, 0, 0, 0), # 170
(6, 3, 3, 6, 5, 1, 1, 0, 4, 2, 2, 0, 0, 5, 1, 6, 3, 2, 1, 4, 1, 1, 0, 3, 0, 0), # 171
(9, 1, 2, 8, 0, 3, 0, 2, 4, 0, 2, 0, 0, 5, 4, 4, 4, 6, 0, 2, 1, 4, 2, 1, 0, 0), # 172
(4, 4, 7, 2, 1, 2, 3, 2, 0, 0, 3, 0, 0, 7, 4, 4, 3, 6, 7, 1, 0, 1, 2, 3, 0, 0), # 173
(9, 3, 5, 3, 5, 1, 1, 1, 2, 3, 3, 0, 0, 7, 3, 2, 3, 2, 1, 2, 1, 2, 1, 1, 0, 0), # 174
(2, 3, 11, 4, 1, 1, 1, 0, 2, 0, 2, 0, 0, 6, 4, 2, 0, 5, 4, 1, 1, 3, 1, 2, 0, 0), # 175
(3, 3, 5, 3, 5, 2, 0, 1, 0, 0, 1, 1, 0, 9, 2, 2, 1, 1, 5, 1, 1, 2, 0, 0, 1, 0), # 176
(2, 2, 4, 3, 3, 2, 2, 0, 1, 1, 0, 1, 0, 5, 3, 2, 2, 5, 1, 1, 1, 1, 3, 0, 1, 0), # 177
(2, 1, 5, 0, 2, 1, 2, 1, 0, 1, 0, 1, 0, 8, 5, 4, 2, 5, 2, 0, 0, 2, 2, 0, 0, 0), # 178
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 179
)
station_arriving_intensity = (
(5.020865578371768, 5.525288559693166, 5.211283229612507, 6.214667773863432, 5.554685607609612, 3.1386549320373387, 4.146035615373915, 4.653176172979423, 6.090099062168007, 3.9580150155223697, 4.205265163885603, 4.897915078306173, 5.083880212578363), # 0
(5.354327152019974, 5.890060694144759, 5.555346591330152, 6.625144253276616, 5.922490337474237, 3.3459835840425556, 4.419468941263694, 4.959513722905708, 6.492245326332909, 4.21898069227715, 4.483096135956131, 5.221216660814354, 5.419791647439855), # 1
(5.686723008979731, 6.253385170890979, 5.8980422855474135, 7.033987704664794, 6.288962973749744, 3.5524851145124448, 4.691818507960704, 5.264625247904419, 6.892786806877549, 4.478913775020546, 4.759823148776313, 5.543232652053055, 5.75436482820969), # 2
(6.016757793146562, 6.613820501936447, 6.238010869319854, 7.439576407532074, 6.652661676001902, 3.757340622585113, 4.962003641647955, 5.567301157494507, 7.290135160921093, 4.736782698426181, 5.0343484118273825, 5.862685684930461, 6.086272806254225), # 3
(6.343136148415981, 6.9699251992857745, 6.573892899703036, 7.840288641382569, 7.012144603796492, 3.9597312073986677, 5.2289436685084585, 5.866331861194915, 7.682702045582707, 4.991555897167679, 5.305574134590575, 6.178298392354764, 6.414188632939817), # 4
(6.66456271868351, 7.320257774943588, 6.9043289337525175, 8.234502685720393, 7.36596991669928, 4.158837968091214, 5.491557914725224, 6.160507768524592, 8.068899117981559, 5.242201805918663, 5.572402526547132, 6.488793407234148, 6.736785359632827), # 5
(6.979742147844666, 7.663376740914501, 7.227959528523866, 8.620596820049652, 7.712695774276043, 4.353842003800864, 5.7487657064812625, 6.4486192890024885, 8.447138035236815, 5.487688859352758, 5.833735797178282, 6.792893362476808, 7.052736037699606), # 6
(7.2873790797949685, 7.997840609203132, 7.543425241072635, 8.996949323874462, 8.050880336092554, 4.543924413665721, 5.999486369959585, 6.729456832147552, 8.815830454467644, 5.726985492143586, 6.088476155965268, 7.089320890990929, 7.360713718506519), # 7
(7.586178158429934, 8.322207891814099, 7.849366628454396, 9.361938476698928, 8.379081761714586, 4.7282662968238895, 6.2426392313431975, 7.001810807478725, 9.173388032793206, 5.959060138964774, 6.335525812389321, 7.376798625684702, 7.659391453419917), # 8
(7.874844027645085, 8.635037100752022, 8.144424247724704, 9.713942558027169, 8.69585821070791, 4.906048752413484, 6.47714361681512, 7.264471624514963, 9.518222427332674, 6.182881234489941, 6.573786975931678, 7.654049199466313, 7.947442293806162), # 9
(8.152081331335932, 8.934886748021516, 8.427238655939124, 10.051339847363288, 8.9997678426383, 5.076452879572607, 6.701918852558355, 7.516229692775211, 9.848745295205214, 6.397417213392714, 6.802161856073574, 7.919795245243952, 8.22353929103161), # 10
(8.416594713398005, 9.220315345627206, 8.696450410153215, 10.372508624211397, 9.289368817071534, 5.238659777439368, 6.915884264755916, 7.7558754217784145, 10.163368293529993, 6.601636510346719, 7.019552662296249, 8.17275939592581, 8.486355496462611), # 11
(8.667088817726812, 9.489881405573698, 8.95070006742254, 10.675827168075612, 9.563219293573377, 5.391850545151869, 7.1179591795908115, 7.982199221043521, 10.460503079426179, 6.794507560025572, 7.224861604080934, 8.411664284420068, 8.734563961465534), # 12
(8.902268288217876, 9.74214343986562, 9.188628184802662, 10.959673758460044, 9.819877431709601, 5.5352062818482235, 7.307062923246056, 8.193991500089481, 10.738561310012932, 6.974998797102904, 7.416990890908869, 8.63523254363492, 8.966837737406735), # 13
(9.120837768766716, 9.975659960507588, 9.408875319349146, 11.222426674868792, 10.05790139104599, 5.667908086666534, 7.482114821904661, 8.390042668435246, 10.995954642409421, 7.142078656252334, 7.594842732261284, 8.84218680647856, 9.181849875652563), # 14
(9.321501903268855, 10.188989479504217, 9.610082028117542, 11.462464196805985, 10.275849331148308, 5.789137058744912, 7.642034201749626, 8.569143135599756, 11.23109473373482, 7.29471557214749, 7.757319337619419, 9.031249705859171, 9.37827342756938), # 15
(9.5029653356198, 10.380690508860132, 9.790888868163425, 11.678164603775716, 10.472279411582333, 5.898074297221459, 7.785740388963976, 8.73008331110196, 11.442393241108286, 7.431877979461996, 7.9033229164645125, 9.20114387468494, 9.554781444523545), # 16
(9.663932709715075, 10.549321560579946, 9.949936396542352, 11.867906175282112, 10.645749791913838, 5.993900901234285, 7.9121527097307105, 8.871653604460818, 11.628261821648984, 7.552534312869467, 8.031755678277799, 9.350591945864055, 9.710046977881415), # 17
(9.803108669450204, 10.693441146668274, 10.08586517030988, 12.030067190829278, 10.794818631708589, 6.075797969921503, 8.020190490232851, 8.99264442519526, 11.787112132476096, 7.6556530070435365, 8.141519832540508, 9.478316552304715, 9.842743079009345), # 18
(9.919197858720699, 10.811607779129744, 10.197315746521578, 12.163025929921314, 10.918044090532366, 6.142946602421208, 8.108773056653394, 9.091846182824245, 11.917355830708779, 7.740202496657828, 8.231517588733878, 9.583040326915096, 9.951542799273696), # 19
(10.010904921422082, 10.902379969968962, 10.282928682233003, 12.265160672062354, 11.013984327950944, 6.194527897871518, 8.176819735175362, 9.168049286866717, 12.017404573466198, 7.805151216385958, 8.30065115633915, 9.66348590260339, 10.035119190040824), # 20
(10.076934501449866, 10.964316231190558, 10.341344534499719, 12.334849696756486, 11.081197503530088, 6.229722955410535, 8.223249851981759, 9.220044146841623, 12.085670017867521, 7.849467600901555, 8.34782274483756, 9.718375912277793, 10.092145302677078), # 21
(10.115991242699579, 10.995975074799144, 10.371203860377285, 12.370471283507836, 11.118241776835575, 6.247712874176367, 8.2469827332556, 9.246621172267915, 12.120563821031915, 7.872120084878242, 8.37193456371034, 9.74643298884649, 10.121294188548827), # 22
(10.13039336334264, 10.999723593964335, 10.374923182441702, 12.374930812757203, 11.127732056032597, 6.25, 8.249804002259339, 9.249493827160494, 12.124926234567901, 7.874792272519433, 8.37495803716174, 9.749897576588934, 10.125), # 23
(10.141012413034153, 10.997537037037038, 10.374314814814815, 12.374381944444446, 11.133107613614852, 6.25, 8.248253812636166, 9.2455, 12.124341666666666, 7.87315061728395, 8.37462457912458, 9.749086419753086, 10.125), # 24
(10.15140723021158, 10.993227023319616, 10.373113854595337, 12.373296039094651, 11.138364945594503, 6.25, 8.24519890260631, 9.237654320987655, 12.123186728395062, 7.869918838591678, 8.373963399426362, 9.747485139460448, 10.125), # 25
(10.161577019048034, 10.986859396433472, 10.371336762688616, 12.37168544238683, 11.143503868421105, 6.25, 8.240686718308721, 9.226104938271606, 12.1214762345679, 7.865150708733425, 8.372980483850855, 9.745115683584821, 10.125), # 26
(10.171520983716636, 10.978499999999999, 10.369, 12.369562499999999, 11.148524198544214, 6.25, 8.234764705882354, 9.211, 12.119225, 7.858899999999999, 8.371681818181818, 9.742, 10.125), # 27
(10.181238328390501, 10.968214677640603, 10.366120027434842, 12.366939557613168, 11.153425752413401, 6.25, 8.22748031146615, 9.192487654320988, 12.116447839506172, 7.851220484682213, 8.370073388203018, 9.73816003657979, 10.125), # 28
(10.19072825724275, 10.95606927297668, 10.362713305898492, 12.36382896090535, 11.15820834647822, 6.25, 8.218880981199066, 9.170716049382715, 12.113159567901235, 7.842165935070874, 8.368161179698216, 9.733617741197987, 10.125), # 29
(10.199989974446497, 10.94212962962963, 10.358796296296296, 12.360243055555555, 11.162871797188236, 6.25, 8.209014161220043, 9.145833333333332, 12.109375, 7.83179012345679, 8.365951178451178, 9.728395061728394, 10.125), # 30
(10.209022684174858, 10.926461591220852, 10.354385459533608, 12.356194187242798, 11.167415920993008, 6.25, 8.19792729766804, 9.117987654320988, 12.105108950617284, 7.820146822130773, 8.363449370245666, 9.722513946044812, 10.125), # 31
(10.217825590600954, 10.909131001371742, 10.349497256515773, 12.35169470164609, 11.171840534342095, 6.25, 8.185667836681999, 9.087327160493828, 12.100376234567902, 7.807289803383631, 8.360661740865444, 9.715996342021034, 10.125), # 32
(10.226397897897897, 10.890203703703703, 10.344148148148149, 12.346756944444444, 11.176145453685063, 6.25, 8.172283224400871, 9.054, 12.095191666666667, 7.793272839506173, 8.357594276094275, 9.708864197530863, 10.125), # 33
(10.23473881023881, 10.869745541838133, 10.338354595336076, 12.341393261316872, 11.180330495471466, 6.25, 8.15782090696361, 9.018154320987653, 12.089570061728397, 7.778149702789209, 8.354252961715924, 9.701139460448102, 10.125), # 34
(10.242847531796807, 10.847822359396433, 10.332133058984912, 12.335615997942385, 11.18439547615087, 6.25, 8.142328330509159, 8.979938271604938, 12.083526234567902, 7.761974165523548, 8.350643783514153, 9.692844078646548, 10.125), # 35
(10.250723266745005, 10.824499999999999, 10.3255, 12.3294375, 11.188340212172836, 6.25, 8.12585294117647, 8.9395, 12.077074999999999, 7.7448, 8.346772727272727, 9.684000000000001, 10.125), # 36
(10.258365219256524, 10.799844307270233, 10.318471879286694, 12.322870113168724, 11.192164519986921, 6.25, 8.108442185104494, 8.896987654320988, 12.070231172839506, 7.726680978509374, 8.34264577877541, 9.674629172382259, 10.125), # 37
(10.265772593504476, 10.773921124828533, 10.311065157750342, 12.315926183127573, 11.19586821604269, 6.25, 8.09014350843218, 8.85254938271605, 12.063009567901235, 7.707670873342479, 8.33826892380596, 9.664753543667125, 10.125), # 38
(10.272944593661986, 10.746796296296296, 10.303296296296297, 12.308618055555556, 11.199451116789703, 6.25, 8.071004357298476, 8.806333333333333, 12.055425000000001, 7.687823456790124, 8.333648148148148, 9.654395061728394, 10.125), # 39
(10.279880423902163, 10.718535665294924, 10.295181755829903, 12.300958076131687, 11.202913038677519, 6.25, 8.05107217784233, 8.758487654320989, 12.047492283950618, 7.667192501143119, 8.328789437585733, 9.643575674439873, 10.125), # 40
(10.286579288398128, 10.689205075445816, 10.286737997256516, 12.29295859053498, 11.206253798155702, 6.25, 8.030394416202695, 8.709160493827161, 12.0392262345679, 7.645831778692272, 8.323698777902482, 9.632317329675354, 10.125), # 41
(10.293040391323, 10.658870370370371, 10.277981481481483, 12.284631944444445, 11.209473211673808, 6.25, 8.009018518518518, 8.6585, 12.030641666666668, 7.623795061728395, 8.318382154882155, 9.620641975308642, 10.125), # 42
(10.299262936849892, 10.627597393689987, 10.268928669410151, 12.275990483539095, 11.212571095681403, 6.25, 7.98699193092875, 8.606654320987655, 12.021753395061728, 7.601136122542296, 8.312845554308517, 9.608571559213535, 10.125), # 43
(10.305246129151927, 10.595451989026063, 10.259596021947875, 12.267046553497943, 11.215547266628045, 6.25, 7.964362099572339, 8.553771604938273, 12.0125762345679, 7.577908733424783, 8.307094961965332, 9.596128029263832, 10.125), # 44
(10.310989172402216, 10.5625, 10.25, 12.2578125, 11.218401540963296, 6.25, 7.9411764705882355, 8.5, 12.003124999999999, 7.554166666666667, 8.301136363636363, 9.583333333333332, 10.125), # 45
(10.31649127077388, 10.528807270233196, 10.240157064471878, 12.24830066872428, 11.221133735136716, 6.25, 7.917482490115388, 8.445487654320988, 11.993414506172838, 7.529963694558756, 8.294975745105374, 9.57020941929584, 10.125), # 46
(10.321751628440035, 10.49443964334705, 10.230083676268862, 12.238523405349794, 11.223743665597867, 6.25, 7.893327604292747, 8.390382716049382, 11.983459567901235, 7.505353589391861, 8.288619092156129, 9.55677823502515, 10.125), # 47
(10.326769449573796, 10.459462962962963, 10.219796296296296, 12.228493055555557, 11.22623114879631, 6.25, 7.868759259259259, 8.334833333333334, 11.973275000000001, 7.4803901234567896, 8.28207239057239, 9.543061728395061, 10.125), # 48
(10.331543938348286, 10.42394307270233, 10.209311385459534, 12.218221965020577, 11.228596001181607, 6.25, 7.8438249011538765, 8.278987654320987, 11.96287561728395, 7.455127069044353, 8.275341626137923, 9.529081847279379, 10.125), # 49
(10.336074298936616, 10.387945816186559, 10.198645404663925, 12.207722479423868, 11.230838039203315, 6.25, 7.81857197611555, 8.222993827160494, 11.9522762345679, 7.429618198445358, 8.268432784636488, 9.514860539551899, 10.125), # 50
(10.34035973551191, 10.351537037037037, 10.187814814814814, 12.197006944444444, 11.232957079310998, 6.25, 7.793047930283224, 8.167, 11.941491666666668, 7.403917283950617, 8.261351851851853, 9.50041975308642, 10.125), # 51
(10.344399452247279, 10.314782578875173, 10.176836076817558, 12.186087705761317, 11.234952937954214, 6.25, 7.767300209795852, 8.111154320987653, 11.930536728395062, 7.3780780978509375, 8.254104813567777, 9.485781435756746, 10.125), # 52
(10.348192653315843, 10.27774828532236, 10.165725651577505, 12.174977109053497, 11.23682543158253, 6.25, 7.741376260792383, 8.055604938271605, 11.919426234567903, 7.3521544124371285, 8.246697655568026, 9.470967535436671, 10.125), # 53
(10.351738542890716, 10.2405, 10.154499999999999, 12.1636875, 11.238574376645502, 6.25, 7.715323529411765, 8.000499999999999, 11.908175, 7.3262, 8.239136363636362, 9.456, 10.125), # 54
(10.355036325145022, 10.203103566529492, 10.143175582990398, 12.152231224279834, 11.24019958959269, 6.25, 7.689189461792948, 7.945987654320987, 11.896797839506172, 7.300268632830361, 8.231426923556553, 9.44090077732053, 10.125), # 55
(10.358085204251871, 10.165624828532236, 10.131768861454047, 12.140620627572016, 11.241700886873659, 6.25, 7.663021504074881, 7.892216049382716, 11.885309567901235, 7.274414083219022, 8.223575321112358, 9.425691815272062, 10.125), # 56
(10.360884384384383, 10.12812962962963, 10.120296296296297, 12.128868055555555, 11.243078084937967, 6.25, 7.636867102396514, 7.839333333333334, 11.873725, 7.24869012345679, 8.215587542087542, 9.410395061728394, 10.125), # 57
(10.36343306971568, 10.090683813443073, 10.108774348422497, 12.116985853909464, 11.244331000235174, 6.25, 7.610773702896797, 7.787487654320987, 11.862058950617284, 7.223150525834477, 8.20746957226587, 9.395032464563329, 10.125), # 58
(10.36573046441887, 10.053353223593964, 10.097219478737998, 12.104986368312757, 11.245459449214845, 6.25, 7.584788751714678, 7.736827160493827, 11.850326234567902, 7.197849062642891, 8.1992273974311, 9.379625971650663, 10.125), # 59
(10.367775772667077, 10.016203703703704, 10.085648148148147, 12.092881944444445, 11.246463248326537, 6.25, 7.558959694989106, 7.6875, 11.838541666666668, 7.172839506172839, 8.190867003367003, 9.364197530864198, 10.125), # 60
(10.369568198633415, 9.97930109739369, 10.0740768175583, 12.080684927983539, 11.247342214019811, 6.25, 7.533333978859033, 7.639654320987654, 11.826720061728395, 7.148175628715135, 8.182394375857339, 9.348769090077733, 10.125), # 61
(10.371106946491004, 9.942711248285322, 10.062521947873801, 12.068407664609055, 11.248096162744234, 6.25, 7.507959049463406, 7.5934382716049384, 11.814876234567901, 7.123911202560586, 8.17381550068587, 9.333362597165067, 10.125), # 62
(10.37239122041296, 9.9065, 10.051, 12.056062500000001, 11.248724910949356, 6.25, 7.482882352941176, 7.549, 11.803025, 7.100099999999999, 8.165136363636364, 9.318, 10.125), # 63
(10.373420224572397, 9.870733196159122, 10.039527434842249, 12.043661779835391, 11.249228275084748, 6.25, 7.458151335431292, 7.506487654320988, 11.791181172839506, 7.076795793324188, 8.156362950492579, 9.302703246456334, 10.125), # 64
(10.374193163142438, 9.835476680384087, 10.0281207133059, 12.031217849794238, 11.249606071599967, 6.25, 7.433813443072703, 7.466049382716049, 11.779359567901235, 7.054052354823959, 8.147501247038285, 9.287494284407863, 10.125), # 65
(10.374709240296196, 9.800796296296298, 10.016796296296297, 12.018743055555555, 11.249858116944573, 6.25, 7.409916122004357, 7.427833333333334, 11.767575, 7.031923456790123, 8.138557239057238, 9.272395061728396, 10.125), # 66
(10.374967660206792, 9.766757887517146, 10.005570644718793, 12.006249742798353, 11.24998422756813, 6.25, 7.386506818365206, 7.391987654320989, 11.755842283950617, 7.010462871513489, 8.12953691233321, 9.257427526291723, 10.125), # 67
(10.374791614480825, 9.733248639320323, 9.994405949931412, 11.993641740472357, 11.249877955297345, 6.2498840115836, 7.363515194829646, 7.358343850022862, 11.744087848651121, 6.989620441647166, 8.120285988540376, 9.242530021899743, 10.124875150034294), # 68
(10.373141706924315, 9.699245519713262, 9.982988425925925, 11.980283514492752, 11.248910675381262, 6.248967078189301, 7.340268181346613, 7.325098765432099, 11.731797839506173, 6.968806390704429, 8.10986283891547, 9.227218973359324, 10.12388599537037), # 69
(10.369885787558895, 9.664592459843355, 9.971268432784635, 11.966087124261943, 11.246999314128942, 6.247161255906112, 7.31666013456137, 7.291952446273434, 11.718902892089622, 6.947919524462734, 8.09814888652608, 9.211422761292809, 10.121932334533609), # 70
(10.365069660642929, 9.62931016859153, 9.959250085733881, 11.951073503757382, 11.244168078754136, 6.244495808565767, 7.292701659538988, 7.258915866483768, 11.705422210791038, 6.926960359342639, 8.085187370783862, 9.195152937212715, 10.119039887688615), # 71
(10.358739130434783, 9.593419354838709, 9.946937499999999, 11.935263586956522, 11.240441176470588, 6.2410000000000005, 7.268403361344538, 7.226, 11.691375, 6.905929411764705, 8.07102153110048, 9.17842105263158, 10.115234375), # 72
(10.35094000119282, 9.556940727465816, 9.934334790809327, 11.918678307836823, 11.23584281449205, 6.236703094040542, 7.243775845043092, 7.193215820759031, 11.676780464106082, 6.884827198149493, 8.055694606887588, 9.161238659061919, 10.110541516632374), # 73
(10.341718077175404, 9.519894995353777, 9.921446073388202, 11.901338600375738, 11.230397200032275, 6.231634354519128, 7.218829715699722, 7.160574302697759, 11.661657807498857, 6.863654234917561, 8.039249837556856, 9.143617308016267, 10.104987032750344), # 74
(10.331119162640901, 9.482302867383511, 9.908275462962962, 11.883265398550725, 11.224128540305012, 6.22582304526749, 7.1935755783795, 7.128086419753086, 11.6460262345679, 6.84241103848947, 8.021730462519935, 9.125568551007147, 10.098596643518519), # 75
(10.319189061847677, 9.44418505243595, 9.894827074759945, 11.864479636339238, 11.217061042524005, 6.219298430117361, 7.168024038147495, 7.095763145861912, 11.629904949702789, 6.821098125285779, 8.003179721188491, 9.107103939547082, 10.091396069101508), # 76
(10.305973579054093, 9.40556225939201, 9.881105024005485, 11.845002247718732, 11.209218913903008, 6.212089772900472, 7.142185700068779, 7.063615454961135, 11.613313157293096, 6.7997160117270505, 7.983640852974187, 9.088235025148606, 10.083411029663925), # 77
(10.291518518518519, 9.366455197132618, 9.867113425925925, 11.824854166666666, 11.200626361655774, 6.204226337448559, 7.116071169208425, 7.031654320987655, 11.596270061728394, 6.7782652142338415, 7.9631570972886765, 9.068973359324238, 10.074667245370371), # 78
(10.275869684499314, 9.326884574538697, 9.8528563957476, 11.804056327160493, 11.191307592996047, 6.195737387593354, 7.089691050631501, 6.9998907178783725, 11.578794867398262, 6.756746249226714, 7.941771693543622, 9.049330493586504, 10.065190436385459), # 79
(10.259072881254847, 9.286871100491172, 9.838338048696844, 11.782629663177671, 11.181286815137579, 6.18665218716659, 7.063055949403081, 6.968335619570188, 11.560906778692273, 6.7351596331262265, 7.919527881150688, 9.029317979447935, 10.0550063228738), # 80
(10.241173913043479, 9.246435483870968, 9.8235625, 11.760595108695654, 11.170588235294117, 6.177, 7.036176470588235, 6.937, 11.542625, 6.713505882352941, 7.8964688995215315, 9.008947368421053, 10.044140624999999), # 81
(10.222218584123576, 9.205598433559008, 9.808533864883403, 11.737973597691894, 11.159236060679415, 6.166810089925317, 7.009063219252036, 6.90589483310471, 11.52396873571102, 6.691785513327416, 7.872637988067813, 8.988230212018387, 10.03261906292867), # 82
(10.202252698753504, 9.164380658436214, 9.793256258573388, 11.714786064143853, 11.147254498507221, 6.156111720774272, 6.981726800459553, 6.875031092821216, 11.504957190214906, 6.669999042470211, 7.848078386201194, 8.967178061752461, 10.020467356824417), # 83
(10.181322061191626, 9.122802867383513, 9.777733796296296, 11.691053442028986, 11.134667755991286, 6.144934156378601, 6.954177819275858, 6.844419753086419, 11.485609567901234, 6.648146986201889, 7.822833333333333, 8.945802469135803, 10.007711226851852), # 84
(10.159472475696308, 9.080885769281826, 9.761970593278463, 11.666796665324746, 11.121500040345357, 6.133306660570035, 6.926426880766024, 6.814071787837221, 11.465945073159578, 6.626229860943005, 7.796946068875894, 8.924114985680937, 9.994376393175584), # 85
(10.136749746525913, 9.03865007301208, 9.745970764746229, 11.64203666800859, 11.107775558783183, 6.121258497180309, 6.89848458999512, 6.783998171010516, 11.445982910379517, 6.604248183114124, 7.770459832240534, 8.902127162900394, 9.98048857596022), # 86
(10.113199677938807, 8.996116487455197, 9.729738425925925, 11.61679438405797, 11.09351851851852, 6.108818930041152, 6.870361552028219, 6.75420987654321, 11.425742283950619, 6.582202469135802, 7.743417862838915, 8.879850552306692, 9.96607349537037), # 87
(10.088868074193357, 8.9533057214921, 9.713277692043896, 11.59109074745035, 11.07875312676511, 6.096017222984301, 6.842068371930391, 6.724717878372199, 11.40524239826246, 6.560093235428601, 7.715863400082698, 8.857296705412365, 9.951156871570646), # 88
(10.063800739547922, 8.910238484003717, 9.696592678326475, 11.564946692163177, 11.063503590736707, 6.082882639841488, 6.813615654766708, 6.695533150434385, 11.384502457704619, 6.537920998413083, 7.687839683383544, 8.834477173729935, 9.935764424725651), # 89
(10.03804347826087, 8.866935483870968, 9.6796875, 11.538383152173914, 11.04779411764706, 6.069444444444445, 6.785014005602241, 6.666666666666666, 11.363541666666668, 6.515686274509804, 7.65938995215311, 8.81140350877193, 9.919921875), # 90
(10.011642094590563, 8.823417429974777, 9.662566272290809, 11.511421061460013, 11.031648914709915, 6.055731900624904, 6.756274029502062, 6.638129401005944, 11.342379229538182, 6.4933895801393255, 7.63055744580306, 8.788087262050874, 9.903654942558298), # 91
(9.984642392795372, 8.779705031196071, 9.64523311042524, 11.484081353998926, 11.015092189139029, 6.041774272214601, 6.727406331531242, 6.609932327389118, 11.321034350708734, 6.471031431722209, 7.601385403745053, 8.764539985079297, 9.886989347565157), # 92
(9.957090177133654, 8.735818996415771, 9.62769212962963, 11.456384963768118, 10.998148148148148, 6.027600823045267, 6.69842151675485, 6.582086419753087, 11.299526234567901, 6.448612345679011, 7.57191706539075, 8.74077322936972, 9.869950810185184), # 93
(9.92903125186378, 8.691780034514801, 9.609947445130317, 11.428352824745035, 10.98084099895102, 6.0132408169486355, 6.669330190237961, 6.554602652034752, 11.277874085505259, 6.426132838430297, 7.54219567015181, 8.716798546434674, 9.85256505058299), # 94
(9.90051142124411, 8.647608854374088, 9.592003172153635, 11.400005870907139, 10.963194948761398, 5.9987235177564395, 6.640142957045644, 6.527491998171011, 11.25609710791038, 6.403593426396621, 7.512264457439896, 8.69262748778668, 9.834857788923182), # 95
(9.871576489533012, 8.603326164874554, 9.573863425925927, 11.371365036231884, 10.945234204793028, 5.984078189300411, 6.610870422242971, 6.500765432098766, 11.234214506172838, 6.3809946259985475, 7.482166666666667, 8.668271604938273, 9.816854745370371), # 96
(9.842272260988848, 8.558952674897121, 9.555532321673525, 11.342451254696725, 10.926982974259664, 5.969334095412284, 6.581523190895013, 6.474433927754916, 11.212245484682214, 6.358336953656634, 7.451945537243782, 8.64374244940197, 9.798581640089164), # 97
(9.812644539869984, 8.514509093322713, 9.53701397462277, 11.31328546027912, 10.908465464375052, 5.954520499923793, 6.552111868066842, 6.44850845907636, 11.190209247828074, 6.335620925791441, 7.421644308582906, 8.619051572690298, 9.78006419324417), # 98
(9.782739130434782, 8.470016129032258, 9.5183125, 11.283888586956522, 10.889705882352942, 5.939666666666667, 6.52264705882353, 6.423, 11.168125, 6.312847058823529, 7.391306220095694, 8.59421052631579, 9.761328125), # 99
(9.752601836941611, 8.425494490906676, 9.49943201303155, 11.254281568706388, 10.870728435407084, 5.924801859472641, 6.493139368230145, 6.3979195244627345, 11.146011945587563, 6.290015869173458, 7.36097451119381, 8.569230861790967, 9.742399155521262), # 100
(9.722278463648834, 8.380964887826895, 9.480376628943759, 11.224485339506174, 10.85155733075123, 5.909955342173449, 6.463599401351762, 6.3732780064014625, 11.123889288980338, 6.267127873261788, 7.330692421288912, 8.544124130628353, 9.723303004972564), # 101
(9.691814814814816, 8.336448028673836, 9.461150462962962, 11.194520833333334, 10.832216775599129, 5.895156378600824, 6.43403776325345, 6.349086419753086, 11.1017762345679, 6.244183587509078, 7.300503189792663, 8.518901884340481, 9.704065393518519), # 102
(9.661256694697919, 8.291964622328422, 9.4417576303155, 11.164408984165325, 10.812730977164529, 5.880434232586496, 6.40446505900028, 6.325355738454504, 11.079691986739826, 6.221183528335889, 7.270450056116723, 8.493575674439873, 9.68471204132373), # 103
(9.63064990755651, 8.247535377671579, 9.422202246227709, 11.134170725979603, 10.79312414266118, 5.865818167962201, 6.374891893657326, 6.302096936442616, 11.057655749885688, 6.19812821216278, 7.24057625967275, 8.468157052439054, 9.665268668552812), # 104
(9.600040257648953, 8.203181003584229, 9.402488425925926, 11.103826992753623, 10.773420479302832, 5.851337448559671, 6.345328872289658, 6.279320987654321, 11.035686728395062, 6.175018155410313, 7.210925039872408, 8.442657569850553, 9.64576099537037), # 105
(9.569473549233614, 8.158922208947299, 9.382620284636488, 11.073398718464842, 10.753644194303236, 5.837021338210638, 6.315786599962345, 6.25703886602652, 11.01380412665752, 6.151853874499045, 7.181539636127355, 8.417088778186894, 9.626214741941014), # 106
(9.538995586568856, 8.11477970264171, 9.362601937585735, 11.042906837090714, 10.733819494876139, 5.822899100746838, 6.286275681740461, 6.235261545496114, 10.992027149062643, 6.128635885849539, 7.152463287849252, 8.391462228960604, 9.606655628429355), # 107
(9.508652173913044, 8.070774193548388, 9.3424375, 11.012372282608696, 10.713970588235293, 5.809, 6.256806722689075, 6.214, 10.970375, 6.105364705882353, 7.1237392344497605, 8.365789473684211, 9.587109375), # 108
(9.478489115524543, 8.026926390548255, 9.322131087105625, 10.98181598899624, 10.69412168159445, 5.795353299801859, 6.227390327873262, 6.193265203475081, 10.948866883859168, 6.082040851018047, 7.09541071534054, 8.340082063870238, 9.567601701817559), # 109
(9.448552215661715, 7.983257002522237, 9.301686814128946, 10.951258890230811, 10.674296982167354, 5.7819882639841484, 6.198037102358089, 6.173068129858253, 10.92752200502972, 6.058664837677183, 7.06752096993325, 8.314351551031214, 9.54815832904664), # 110
(9.41888727858293, 7.9397867383512555, 9.281108796296298, 10.920721920289855, 10.654520697167756, 5.768934156378601, 6.168757651208631, 6.153419753086419, 10.906359567901236, 6.035237182280319, 7.040113237639553, 8.288609486679663, 9.528804976851852), # 111
(9.38954010854655, 7.896536306916234, 9.26040114883402, 10.890226013150832, 10.634817033809409, 5.756220240816949, 6.139562579489958, 6.134331047096479, 10.885398776863282, 6.011758401248016, 7.013230757871109, 8.26286742232811, 9.509567365397805), # 112
(9.360504223703044, 7.853598618785952, 9.239617828252069, 10.85983388249204, 10.615175680173705, 5.7438697692145135, 6.1105259636567695, 6.115852568780606, 10.86471281125862, 5.988304736612729, 6.9869239061528665, 8.237192936504428, 9.490443900843221), # 113
(9.331480897900065, 7.811397183525536, 9.219045675021619, 10.829789421277336, 10.595393354566326, 5.731854608529901, 6.082018208410579, 6.09821125950512, 10.84461903571306, 5.965315167912783, 6.961244337113197, 8.211912172112974, 9.471275414160035), # 114
(9.302384903003995, 7.769947198683046, 9.198696932707318, 10.800084505181779, 10.5754076778886, 5.7201435124987645, 6.054059650191562, 6.081402654278709, 10.82512497866879, 5.942825327988077, 6.936154511427094, 8.187037582558851, 9.452006631660376), # 115
(9.273179873237634, 7.729188281291702, 9.178532189983873, 10.770666150266404, 10.555188526383779, 5.708708877287098, 6.026604817527893, 6.065380312898993, 10.80618133922783, 5.920793358449547, 6.911605931271481, 8.162523197487346, 9.43260725975589), # 116
(9.243829442823772, 7.689060048384721, 9.158512035525986, 10.741481372592244, 10.53470577629511, 5.6975230990608905, 5.9996082389477525, 6.050097795163585, 10.787738816492203, 5.899177400908129, 6.887550098823283, 8.13832304654375, 9.413047004858225), # 117
(9.214297245985211, 7.649502116995324, 9.138597058008367, 10.712477188220333, 10.513929303865842, 5.686558573986138, 5.973024442979315, 6.0355086608700965, 10.769748109563935, 5.877935596974759, 6.863938516259424, 8.11439115937335, 9.393295573379024), # 118
(9.184546916944742, 7.610454104156729, 9.118747846105723, 10.683600613211706, 10.492828985339221, 5.675787698228833, 5.946807958150756, 6.021566469816145, 10.752159917545043, 5.857026088260372, 6.840722685756828, 8.090681565621434, 9.373322671729932), # 119
(9.154542089925162, 7.571855626902158, 9.098924988492762, 10.654798663627394, 10.471374696958497, 5.665182867954965, 5.920913312990253, 6.008224781799343, 10.734924939537558, 5.836407016375905, 6.817854109492416, 8.067148294933297, 9.353098006322597), # 120
(9.124246399149268, 7.533646302264829, 9.079089073844187, 10.626018355528434, 10.449536314966918, 5.6547164793305305, 5.89529503602598, 5.995437156617307, 10.717993874643499, 5.816036522932296, 6.795284289643116, 8.043745376954222, 9.33259128356866), # 121
(9.093623478839854, 7.495765747277961, 9.059200690834711, 10.597206704975855, 10.427283715607734, 5.644360928521519, 5.869907655786117, 5.983157154067649, 10.70131742196489, 5.795872749540477, 6.772964728385851, 8.0204268413295, 9.31177220987977), # 122
(9.062636963219719, 7.458153578974774, 9.039220428139036, 10.568310728030694, 10.40458677512419, 5.634088611693925, 5.844705700798839, 5.971338333947983, 10.684846280603754, 5.775873837811387, 6.750846927897544, 7.997146717704421, 9.290610491667572), # 123
(9.031250486511654, 7.420749414388487, 9.01910887443187, 10.539277440753986, 10.381415369759537, 5.623871925013739, 5.819643699592319, 5.959934256055926, 10.668531149662115, 5.755997929355961, 6.728882390355119, 7.973859035724275, 9.269075835343711), # 124
(8.999427682938459, 7.38349287055232, 8.998826618387923, 10.51005385920676, 10.357739375757022, 5.613683264646956, 5.794676180694739, 5.948898480189091, 10.652322728241993, 5.736203165785134, 6.707022617935501, 7.950517825034348, 9.247137947319828), # 125
(8.967132186722928, 7.346323564499494, 8.978334248681898, 10.480586999450054, 10.333528669359893, 5.603495026759568, 5.76975767263427, 5.938184566145092, 10.636171715445418, 5.7164476887098425, 6.685219112815613, 7.927077115279934, 9.224766534007578), # 126
(8.93432763208786, 7.309181113263224, 8.957592353988504, 10.450823877544899, 10.308753126811398, 5.593279607517565, 5.744842703939094, 5.927746073721545, 10.620028810374407, 5.696689639741024, 6.6634233771723785, 7.903490936106316, 9.201931301818599), # 127
(8.900977653256046, 7.272005133876735, 8.93656152298245, 10.420711509552332, 10.28338262435479, 5.583009403086944, 5.719885803137382, 5.917536562716062, 10.603844712130984, 5.6768871604896125, 6.641586913182724, 7.879713317158788, 9.178601957164537), # 128
(8.867045884450281, 7.234735243373241, 8.91520234433844, 10.390196911533382, 10.257387038233311, 5.572656809633695, 5.694841498757313, 5.90750959292626, 10.587570119817174, 5.656998392566545, 6.619661223023571, 7.855698288082636, 9.154748206457038), # 129
(8.832495959893366, 7.197311058785966, 8.893475406731179, 10.359227099549086, 10.230736244690213, 5.562194223323808, 5.669664319327063, 5.89761872414975, 10.571155732535, 5.636981477582757, 6.5975978088718445, 7.831399878523152, 9.130339756107748), # 130
(8.797291513808094, 7.159672197148127, 8.87134129883538, 10.327749089660475, 10.203400119968745, 5.55159404032328, 5.644308793374809, 5.88781751618415, 10.554552249386486, 5.616794557149185, 6.575348172904468, 7.806772118125624, 9.105346312528312), # 131
(8.76139618041726, 7.121758275492944, 8.848760609325746, 10.295709897928587, 10.175348540312154, 5.540828656798102, 5.618729449428725, 5.878059528827073, 10.537710369473654, 5.596395772876765, 6.552863817298364, 7.781769036535342, 9.079737582130376), # 132
(8.724773593943663, 7.083508910853635, 8.825693926876983, 10.263056540414452, 10.146551381963686, 5.529870468914266, 5.592880816016989, 5.868298321876132, 10.520580791898526, 5.575743266376432, 6.53009624423046, 7.756344663397592, 9.053483271325586), # 133
(8.687387388610095, 7.044863720263423, 8.802101840163804, 10.229736033179103, 10.116978521166592, 5.518691872837765, 5.566717421667779, 5.858487455128944, 10.503114215763128, 5.5547951792591235, 6.506996955877678, 7.730453028357666, 9.026553086525583), # 134
(8.649201198639354, 7.005762320755524, 8.777944937860909, 10.195695392283579, 10.08659983416412, 5.507265264734592, 5.540193794909268, 5.84858048838312, 10.48526134016948, 5.533509653135776, 6.483517454416942, 7.704048161060852, 8.99891673414202), # 135
(8.610178658254235, 6.966144329363159, 8.753183808643008, 10.160881633788906, 10.055385197199517, 5.495563040770739, 5.513264464269635, 5.838530981436277, 10.466972864219606, 5.511844829617322, 6.459609242025177, 7.677084091152441, 8.970543920586536), # 136
(8.570283401677534, 6.925949363119547, 8.72777904118481, 10.125241773756125, 10.023304486516034, 5.483557597112198, 5.485883958277055, 5.828292494086029, 10.448199487015533, 5.4897588503147015, 6.435223820879306, 7.649514848277719, 8.941404352270776), # 137
(8.529479063132047, 6.885117039057908, 8.701691224161017, 10.088722828246263, 9.990327578356919, 5.471221329924964, 5.458006805459704, 5.81781858612999, 10.428891907659281, 5.4672098568388465, 6.410312693156252, 7.621294462081978, 8.91146773560639), # 138
(8.487729276840568, 6.843586974211461, 8.67488094624634, 10.051271813320358, 9.956424348965415, 5.458526635375026, 5.429587534345759, 5.807062817365774, 10.409000825252871, 5.444155990800697, 6.38482736103294, 7.592376962210506, 8.880703777005019), # 139
(8.444997677025897, 6.801298785613425, 8.647308796115487, 10.012835745039444, 9.92156467458478, 5.445445909628379, 5.400580673463397, 5.795978747590996, 10.388476938898332, 5.420555393811186, 6.358719326686294, 7.562716378308592, 8.849082182878314), # 140
(8.40124789791083, 6.758192090297021, 8.61893536244316, 9.973361639464553, 9.885718431458253, 5.431951548851015, 5.370940751340795, 5.78451993660327, 10.36727094769768, 5.396366207481251, 6.331940092293238, 7.532266740021525, 8.816572659637913), # 141
(8.356443573718156, 6.714206505295466, 8.58972123390407, 9.93279651265672, 9.848855495829087, 5.418015949208927, 5.340622296506126, 5.772639944200211, 10.345333550752942, 5.371546573421828, 6.304441160030697, 7.500982076994594, 8.783144913695466), # 142
(8.310548338670674, 6.669281647641981, 8.559626999172925, 9.891087380676975, 9.810945743940529, 5.403611506868106, 5.3095798374875685, 5.760292330179432, 10.322615447166147, 5.3460546332438525, 6.276174032075593, 7.4688164188730894, 8.748768651462617), # 143
(8.263525826991184, 6.623357134369786, 8.528613246924428, 9.848181259586356, 9.771959052035829, 5.388710617994547, 5.277767902813299, 5.747430654338549, 10.29906733603931, 5.31984852855826, 6.247090210604851, 7.435723795302299, 8.713413579351014), # 144
(8.215339672902477, 6.576372582512099, 8.496640565833289, 9.804025165445895, 9.731865296358233, 5.3732856787542405, 5.245141021011493, 5.734008476475176, 10.274639916474454, 5.292886400975988, 6.217141197795395, 7.401658235927513, 8.6770494037723), # 145
(8.16595351062735, 6.528267609102142, 8.463669544574216, 9.758566114316626, 9.690634353150992, 5.35730908531318, 5.21165372061033, 5.719979356386927, 10.249283887573606, 5.2651263921079705, 6.186278495824149, 7.3665737703940195, 8.639645831138118), # 146
(8.1153309743886, 6.47898183117313, 8.42966077182191, 9.71175112225958, 9.648236098657351, 5.340753233837358, 5.177260530137981, 5.705296853871415, 10.22294994843879, 5.236526643565146, 6.154453606868036, 7.3304244283471105, 8.601172567860118), # 147
(8.063435698409021, 6.428454865758288, 8.394574836251083, 9.663527205335797, 9.604640409120561, 5.323590520492767, 5.1419159781226265, 5.689914528726257, 10.195588798172029, 5.207045296958447, 6.1216180331039824, 7.29316423943207, 8.561599320349941), # 148
(8.010231316911412, 6.37662632989083, 8.358372326536443, 9.613841379606303, 9.55981716078387, 5.3057933414453995, 5.105574593092441, 5.673785940749067, 10.167151135875338, 5.176640493898813, 6.08772327670891, 7.254747233294191, 8.520895795019237), # 149
(7.955681464118564, 6.323435840603979, 8.321013831352694, 9.562640661132138, 9.513736229890526, 5.287334092861249, 5.0681909035756005, 5.656864649737456, 10.137587660650752, 5.1452703759971765, 6.0527208398597425, 7.215127439578763, 8.479031698279647), # 150
(7.899749774253275, 6.268823014930954, 8.282459939374542, 9.50987206597433, 9.466367492683776, 5.268185170906305, 5.029719438100283, 5.639104215489043, 10.106849071600289, 5.112893084864478, 6.016562224733405, 7.174258887931072, 8.435976736542818), # 151
(7.842399881538343, 6.212727469904973, 8.242671239276701, 9.455482610193918, 9.417680825406869, 5.2483189717465635, 4.9901147251946645, 5.620458197801441, 10.07488606782597, 5.079466762111649, 5.979198933506821, 7.132095607996409, 8.391700616220398), # 152
(7.78359542019656, 6.155088822559256, 8.201608319733868, 9.399419309851933, 9.367646104303056, 5.2277078915480155, 4.949331293386919, 5.600880156472262, 10.041649348429823, 5.044949549349629, 5.940582468356916, 7.088591629420064, 8.346173043724027), # 153
(7.723300024450729, 6.095846689927024, 8.159231769420758, 9.34162918100941, 9.31623320561558, 5.206324326476654, 4.907323671205228, 5.580323651299123, 10.007089612513866, 5.009299588189353, 5.900664331460612, 7.043700981847325, 8.299363725465357), # 154
(7.6614773285236355, 6.034940689041495, 8.115502177012075, 9.282059239727378, 9.263412005587696, 5.184140672698471, 4.864046387177761, 5.558742242079636, 9.971157559180128, 4.972475020241754, 5.859396024994833, 6.997377694923482, 8.251242367856026), # 155
(7.598090966638081, 5.972310436935888, 8.070380131182526, 9.220656502066875, 9.209152380462648, 5.161129326379461, 4.8194539698327, 5.5360894886114185, 9.933803887530626, 4.934433987117773, 5.816729051136504, 6.949575798293822, 8.201778677307685), # 156
(7.533104573016862, 5.907895550643423, 8.023826220606818, 9.157367984088937, 9.153424206483685, 5.137262683685614, 4.773500947698219, 5.512318950692082, 9.894979296667389, 4.895134630428341, 5.772614912062549, 6.900249321603637, 8.150942360231976), # 157
(7.464680946405239, 5.840453120772258, 7.973591953902355, 9.089769581651243, 9.093681105870997, 5.11102447631711, 4.725106720927857, 5.485796952349372, 9.851662091599097, 4.8533659162911436, 5.7255957525389425, 6.847599564194339, 8.096485859415345), # 158
(7.382286766978402, 5.763065319599478, 7.906737818402988, 9.003977158788453, 9.015191309781628, 5.073689648007103, 4.668212763385716, 5.4472135327643825, 9.786427261222144, 4.802280994098745, 5.667416935618994, 6.781362523683108, 8.025427646920194), # 159
(7.284872094904309, 5.675096728540714, 7.821920957955888, 8.89857751040886, 8.916420131346795, 5.024341296047684, 4.602243748383784, 5.3955991895273465, 9.697425227228651, 4.741205651862893, 5.59725950860954, 6.700501948887847, 7.93642060889358), # 160
(7.17322205458596, 5.577120868080469, 7.720046971910309, 8.774572503756728, 8.798393124282113, 4.963577241570314, 4.527681446006876, 5.33160053310978, 9.585829766999018, 4.6706581931709374, 5.515741654599707, 6.605767468907571, 7.830374044819097), # 161
(7.048121770426357, 5.469711258703239, 7.602021459615496, 8.632964006076326, 8.662135842303204, 4.891995305706455, 4.445007626339809, 5.255864173983202, 9.452814657913637, 4.5911569216102315, 5.42348155667862, 6.497908712841293, 7.708197254180333), # 162
(6.9103563668284975, 5.353441420893524, 7.468750020420702, 8.474753884611934, 8.508673839125688, 4.810193309587572, 4.354704059467401, 5.169036722619125, 9.299553677352906, 4.503220140768125, 5.321097397935408, 6.3776753097880325, 7.570799536460879), # 163
(6.760710968195384, 5.228884875135821, 7.321138253675176, 8.300944006607818, 8.339032668465189, 4.718769074345129, 4.257252515474466, 5.071764789489069, 9.127220602697223, 4.407366154231968, 5.209207361459196, 6.245816888846803, 7.419090191144328), # 164
(6.599970698930017, 5.096615141914632, 7.160091758728169, 8.112536239308252, 8.154237884037324, 4.618320421110586, 4.153134764445822, 4.964694985064546, 8.93698921132698, 4.3041132655891134, 5.088429630339111, 6.10308307911662, 7.25397851771427), # 165
(6.428920683435397, 4.957205741714454, 6.9865161349289275, 7.910532449957501, 7.955315039557714, 4.509445171015408, 4.042832576466286, 4.848473919817077, 8.730033280622573, 4.193979778426912, 4.959382387664279, 5.950223509696501, 7.0763738156542955), # 166
(6.248346046114523, 4.811230195019787, 6.801316981626704, 7.695934505799843, 7.74328968874198, 4.392741145191058, 3.9268277216206746, 4.723748204218176, 8.5075265879644, 4.077483996332714, 4.822683816523827, 5.7879878096854585, 6.887185384447996), # 167
(6.059031911370395, 4.659262022315128, 6.605399898170748, 7.469744274079546, 7.519187385305742, 4.268806164768999, 3.805601969993804, 4.5911644487393595, 8.270642910732855, 3.955144222893872, 4.678952100006881, 5.617125608182511, 6.6873225235789615), # 168
(5.861763403606015, 4.501874744084979, 6.399670483910309, 7.232963622040883, 7.28403368296462, 4.138238050880695, 3.6796370916704917, 4.451369263852145, 8.020556026308338, 3.8274787616977366, 4.528805421202568, 5.438386534286672, 6.477694532530785), # 169
(5.657325647224384, 4.339641880813837, 6.185034338194635, 6.98659441692812, 7.038854135434233, 4.001634624657607, 3.549414856735553, 4.305009260028047, 7.7584397120712385, 3.6950059163316578, 4.372861963200016, 5.252520217096959, 6.259210710787055), # 170
(5.4465037666285, 4.173136952986201, 5.962397060372978, 6.731638525985535, 6.784674296430206, 3.8595937072311983, 3.4154170352738054, 4.152731047738583, 7.485467745401956, 3.5582439903829886, 4.211739909088348, 5.060276285712386, 6.032780357831365), # 171
(5.230082886221365, 4.002933481086569, 5.7326642497945866, 6.4690978164573965, 6.5225197196681535, 3.7127131197329337, 3.2781253973700655, 3.9951812374552707, 7.202813903680886, 3.41771128743908, 4.046057441956694, 4.862404369231971, 5.799312773147303), # 172
(5.00884813040598, 3.8296049855994423, 5.4967415058087115, 6.1999741555879755, 6.253415958863702, 3.5615906832942748, 3.1380217131091497, 3.8330064396496235, 6.911651964288422, 3.2739261110872815, 3.8764327448941778, 4.659654096754725, 5.5597172562184625), # 173
(4.783584623585344, 3.653724987009318, 5.2555344277646014, 5.9252694106215404, 5.978388567732466, 3.406824219046685, 2.9955877525758754, 3.6668532647931604, 6.613155704604964, 3.1274067649149466, 3.7034840009899277, 4.452775097379668, 5.314903106528433), # 174
(4.555077490162455, 3.4758670058006946, 5.009948615011508, 5.645985448802367, 5.698463099990069, 3.2490115481216284, 2.851305285855058, 3.497368323357396, 6.308498902010905, 2.9786715525094243, 3.5278293933330693, 4.242517000205814, 5.0657796235608075), # 175
(4.324111854540319, 3.296604562458073, 4.760889666898678, 5.363124137374725, 5.41466510935213, 3.0887504916505666, 2.705656083031515, 3.325198225813849, 5.998855333886642, 2.828238777458067, 3.35008710501273, 4.029629434332179, 4.813256106799174), # 176
(4.0914728411219325, 3.1165111774659513, 4.5092631827753635, 5.077687343582883, 5.128020149534273, 2.9266388707649633, 2.5591219141900625, 3.1509895826340326, 5.68539877761257, 2.6766267433482245, 3.1708753191180357, 3.8148620288577786, 4.5582418557271245), # 177
(3.8579455743102966, 2.9361603713088282, 4.255974761990814, 4.790676934671116, 4.8395537742521135, 2.7632745065962827, 2.4121845494155174, 2.9753890042894655, 5.3693030105690855, 2.52435375376725, 2.9908122187381125, 3.598964412881627, 4.301646169828252), # 178
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 179
)
passenger_arriving_acc = (
(4, 5, 4, 9, 0, 2, 3, 0, 2, 1, 0, 1, 0, 4, 2, 4, 3, 4, 1, 2, 0, 0, 1, 1, 1, 0), # 0
(11, 11, 10, 12, 2, 5, 3, 1, 4, 2, 0, 2, 0, 6, 6, 6, 8, 9, 4, 4, 0, 3, 3, 2, 3, 0), # 1
(20, 16, 15, 14, 5, 6, 4, 3, 8, 5, 0, 2, 0, 12, 16, 8, 12, 14, 9, 6, 2, 5, 5, 3, 3, 0), # 2
(25, 18, 19, 16, 8, 10, 4, 6, 8, 8, 0, 2, 0, 20, 21, 9, 15, 21, 13, 11, 4, 11, 5, 4, 4, 0), # 3
(30, 23, 27, 25, 17, 13, 8, 7, 10, 9, 0, 5, 0, 26, 28, 10, 17, 27, 15, 11, 5, 12, 11, 4, 4, 0), # 4
(34, 33, 31, 27, 22, 17, 9, 11, 11, 11, 0, 5, 0, 34, 30, 13, 21, 33, 18, 12, 5, 17, 14, 5, 4, 0), # 5
(40, 36, 42, 30, 29, 21, 10, 12, 12, 11, 2, 6, 0, 42, 37, 17, 22, 36, 22, 15, 7, 22, 16, 6, 5, 0), # 6
(41, 42, 45, 36, 36, 25, 13, 15, 12, 12, 3, 11, 0, 50, 41, 22, 32, 43, 25, 16, 8, 25, 16, 7, 6, 0), # 7
(49, 49, 54, 47, 41, 27, 16, 18, 14, 13, 3, 11, 0, 60, 48, 29, 34, 49, 27, 17, 10, 29, 18, 7, 8, 0), # 8
(58, 55, 60, 55, 47, 29, 18, 22, 17, 16, 5, 12, 0, 65, 56, 36, 40, 57, 29, 22, 12, 35, 19, 11, 10, 0), # 9
(66, 62, 72, 61, 59, 34, 20, 23, 17, 17, 5, 12, 0, 75, 61, 39, 43, 70, 37, 28, 13, 40, 21, 13, 10, 0), # 10
(77, 69, 82, 65, 65, 35, 21, 27, 22, 19, 6, 14, 0, 84, 68, 44, 51, 75, 40, 32, 17, 43, 25, 13, 10, 0), # 11
(91, 77, 90, 72, 67, 36, 27, 29, 27, 22, 6, 14, 0, 91, 78, 52, 57, 85, 44, 35, 19, 45, 25, 14, 11, 0), # 12
(101, 87, 100, 79, 74, 39, 28, 34, 29, 27, 10, 14, 0, 99, 80, 61, 63, 93, 50, 41, 19, 48, 30, 16, 13, 0), # 13
(114, 96, 113, 85, 80, 43, 36, 38, 32, 27, 11, 14, 0, 108, 92, 68, 65, 101, 52, 45, 21, 52, 32, 18, 14, 0), # 14
(123, 110, 122, 97, 85, 45, 39, 45, 35, 28, 12, 16, 0, 117, 99, 73, 71, 112, 53, 48, 24, 55, 35, 18, 15, 0), # 15
(139, 122, 127, 105, 92, 47, 44, 50, 40, 30, 14, 17, 0, 124, 111, 77, 76, 120, 56, 53, 27, 58, 37, 23, 15, 0), # 16
(148, 141, 133, 116, 96, 51, 45, 58, 47, 32, 15, 17, 0, 133, 116, 84, 87, 128, 63, 59, 27, 64, 39, 25, 16, 0), # 17
(156, 147, 140, 129, 102, 53, 49, 61, 52, 34, 16, 17, 0, 146, 126, 87, 95, 133, 71, 62, 27, 71, 43, 25, 16, 0), # 18
(165, 154, 151, 135, 109, 59, 55, 63, 54, 36, 16, 17, 0, 159, 140, 94, 95, 137, 82, 65, 31, 76, 47, 25, 16, 0), # 19
(170, 163, 164, 141, 118, 61, 59, 68, 61, 40, 16, 17, 0, 175, 143, 104, 103, 143, 84, 68, 34, 78, 48, 27, 17, 0), # 20
(183, 173, 172, 154, 126, 64, 67, 74, 62, 42, 18, 17, 0, 186, 153, 108, 113, 152, 88, 74, 35, 81, 53, 28, 18, 0), # 21
(194, 184, 179, 159, 136, 68, 78, 78, 68, 42, 19, 17, 0, 197, 162, 119, 119, 160, 96, 76, 39, 88, 53, 29, 19, 0), # 22
(205, 189, 183, 171, 146, 72, 80, 79, 69, 43, 20, 17, 0, 209, 172, 125, 127, 176, 101, 77, 39, 91, 57, 32, 20, 0), # 23
(221, 200, 191, 180, 152, 74, 83, 83, 73, 44, 21, 17, 0, 217, 182, 128, 136, 185, 107, 78, 40, 93, 59, 34, 22, 0), # 24
(235, 210, 203, 191, 160, 77, 86, 86, 75, 44, 22, 18, 0, 225, 190, 138, 140, 194, 110, 85, 42, 95, 59, 35, 23, 0), # 25
(246, 220, 216, 205, 166, 83, 91, 91, 75, 48, 23, 18, 0, 236, 202, 145, 145, 202, 118, 89, 42, 101, 59, 36, 25, 0), # 26
(260, 228, 225, 214, 169, 87, 92, 98, 80, 49, 25, 19, 0, 246, 208, 152, 154, 205, 125, 90, 45, 106, 62, 39, 25, 0), # 27
(270, 237, 233, 224, 181, 92, 95, 107, 82, 51, 26, 19, 0, 253, 215, 159, 161, 213, 127, 97, 46, 109, 66, 40, 25, 0), # 28
(281, 242, 238, 233, 190, 93, 102, 112, 88, 52, 28, 20, 0, 264, 221, 172, 168, 218, 133, 101, 47, 114, 69, 41, 26, 0), # 29
(292, 257, 249, 241, 196, 100, 106, 116, 91, 55, 30, 22, 0, 272, 232, 177, 173, 226, 142, 106, 50, 121, 73, 42, 26, 0), # 30
(300, 268, 256, 247, 208, 102, 111, 121, 96, 56, 31, 22, 0, 282, 244, 186, 180, 232, 150, 112, 51, 130, 75, 42, 26, 0), # 31
(306, 280, 264, 262, 220, 106, 113, 125, 105, 56, 32, 23, 0, 289, 251, 194, 188, 241, 154, 116, 53, 131, 78, 44, 27, 0), # 32
(316, 293, 273, 275, 229, 108, 117, 128, 108, 57, 32, 23, 0, 293, 264, 201, 194, 244, 162, 122, 54, 133, 81, 45, 28, 0), # 33
(329, 299, 279, 287, 236, 110, 120, 130, 114, 60, 32, 26, 0, 307, 269, 208, 199, 254, 167, 125, 55, 139, 85, 45, 28, 0), # 34
(348, 317, 281, 296, 249, 118, 124, 135, 120, 61, 35, 27, 0, 317, 279, 213, 205, 264, 171, 126, 58, 143, 86, 45, 28, 0), # 35
(360, 327, 291, 303, 264, 120, 128, 139, 126, 64, 35, 27, 0, 323, 288, 215, 210, 270, 175, 128, 59, 147, 90, 46, 28, 0), # 36
(370, 345, 296, 315, 273, 130, 133, 141, 128, 64, 36, 28, 0, 330, 291, 224, 217, 282, 178, 131, 61, 150, 91, 48, 28, 0), # 37
(378, 356, 304, 333, 277, 137, 133, 145, 129, 66, 39, 29, 0, 340, 301, 230, 222, 290, 182, 139, 66, 157, 96, 49, 29, 0), # 38
(395, 364, 311, 338, 290, 141, 134, 147, 132, 67, 41, 29, 0, 352, 313, 236, 227, 298, 186, 145, 69, 165, 97, 51, 29, 0), # 39
(408, 371, 321, 352, 291, 141, 136, 152, 137, 70, 41, 30, 0, 366, 325, 242, 232, 307, 192, 153, 70, 167, 102, 52, 31, 0), # 40
(416, 378, 325, 365, 295, 143, 139, 156, 138, 71, 44, 32, 0, 372, 335, 245, 239, 312, 195, 160, 71, 174, 106, 53, 31, 0), # 41
(427, 391, 332, 375, 304, 149, 140, 160, 143, 72, 45, 32, 0, 384, 346, 248, 242, 317, 203, 165, 73, 175, 107, 54, 31, 0), # 42
(436, 395, 339, 380, 317, 152, 146, 167, 148, 75, 46, 32, 0, 393, 355, 256, 245, 325, 209, 170, 77, 179, 112, 55, 32, 0), # 43
(449, 404, 348, 386, 328, 156, 146, 174, 151, 80, 48, 32, 0, 403, 364, 262, 249, 335, 214, 175, 80, 186, 116, 56, 32, 0), # 44
(455, 411, 358, 399, 337, 160, 157, 179, 156, 81, 48, 33, 0, 413, 376, 269, 256, 339, 220, 177, 83, 190, 120, 56, 34, 0), # 45
(464, 423, 366, 412, 344, 161, 161, 182, 162, 85, 52, 33, 0, 422, 389, 273, 262, 352, 224, 180, 89, 193, 125, 59, 35, 0), # 46
(476, 437, 371, 421, 356, 163, 168, 188, 167, 86, 52, 33, 0, 432, 399, 276, 264, 358, 227, 187, 95, 198, 127, 59, 36, 0), # 47
(482, 444, 380, 441, 366, 166, 177, 189, 173, 87, 53, 35, 0, 438, 406, 284, 269, 367, 230, 193, 98, 204, 127, 61, 38, 0), # 48
(492, 454, 383, 448, 374, 171, 180, 190, 175, 93, 56, 37, 0, 452, 416, 288, 270, 373, 231, 194, 101, 208, 130, 62, 38, 0), # 49
(507, 464, 399, 458, 377, 177, 183, 196, 182, 94, 58, 37, 0, 468, 420, 299, 279, 386, 235, 195, 106, 210, 135, 64, 39, 0), # 50
(516, 471, 404, 469, 384, 178, 187, 200, 187, 96, 61, 37, 0, 474, 425, 306, 287, 397, 241, 201, 107, 216, 136, 67, 39, 0), # 51
(523, 482, 411, 479, 388, 181, 191, 201, 194, 96, 63, 38, 0, 480, 435, 313, 292, 409, 250, 203, 109, 220, 138, 68, 39, 0), # 52
(527, 494, 418, 485, 396, 185, 193, 208, 198, 97, 65, 41, 0, 487, 444, 320, 297, 413, 255, 205, 111, 221, 142, 69, 40, 0), # 53
(530, 500, 426, 493, 408, 188, 194, 214, 206, 98, 67, 41, 0, 491, 452, 325, 304, 419, 259, 208, 114, 226, 144, 71, 41, 0), # 54
(544, 504, 429, 503, 417, 191, 197, 215, 207, 101, 68, 41, 0, 499, 463, 328, 305, 424, 265, 212, 118, 229, 147, 71, 42, 0), # 55
(560, 510, 440, 509, 427, 194, 198, 222, 210, 102, 70, 41, 0, 512, 469, 334, 307, 435, 267, 217, 118, 234, 150, 72, 42, 0), # 56
(567, 522, 447, 515, 436, 198, 201, 226, 214, 107, 72, 42, 0, 519, 480, 346, 314, 441, 273, 220, 120, 237, 153, 75, 43, 0), # 57
(577, 534, 454, 524, 445, 200, 206, 233, 223, 109, 72, 43, 0, 533, 493, 352, 320, 448, 276, 226, 122, 239, 158, 77, 44, 0), # 58
(587, 542, 461, 533, 449, 204, 210, 233, 224, 112, 73, 43, 0, 540, 504, 367, 324, 455, 278, 228, 123, 241, 162, 77, 45, 0), # 59
(593, 555, 467, 536, 460, 210, 212, 235, 230, 113, 76, 45, 0, 552, 515, 376, 328, 465, 282, 234, 132, 245, 166, 81, 46, 0), # 60
(596, 572, 474, 540, 470, 215, 215, 238, 235, 113, 76, 49, 0, 563, 520, 387, 339, 471, 284, 237, 135, 245, 169, 82, 47, 0), # 61
(611, 581, 484, 551, 478, 218, 217, 242, 238, 114, 80, 49, 0, 570, 526, 393, 343, 479, 289, 241, 139, 247, 172, 83, 47, 0), # 62
(617, 593, 492, 560, 487, 222, 218, 243, 243, 115, 82, 49, 0, 579, 531, 400, 346, 486, 295, 249, 140, 250, 179, 85, 47, 0), # 63
(625, 599, 501, 568, 502, 228, 223, 245, 247, 117, 82, 49, 0, 588, 540, 407, 352, 493, 297, 250, 145, 258, 181, 89, 47, 0), # 64
(639, 609, 503, 577, 505, 230, 225, 250, 250, 119, 83, 49, 0, 595, 551, 410, 357, 513, 302, 251, 151, 258, 186, 91, 48, 0), # 65
(647, 618, 511, 586, 511, 233, 232, 253, 259, 124, 85, 52, 0, 605, 554, 419, 362, 518, 309, 257, 153, 265, 187, 94, 49, 0), # 66
(663, 628, 520, 594, 515, 236, 235, 255, 262, 126, 87, 53, 0, 610, 564, 429, 370, 523, 314, 264, 153, 270, 190, 94, 50, 0), # 67
(672, 631, 529, 603, 523, 239, 240, 256, 266, 127, 87, 54, 0, 619, 570, 432, 374, 531, 324, 268, 156, 277, 193, 96, 51, 0), # 68
(685, 636, 539, 606, 528, 242, 243, 259, 270, 127, 89, 54, 0, 629, 579, 439, 380, 537, 326, 271, 157, 283, 195, 97, 51, 0), # 69
(696, 645, 555, 616, 535, 248, 243, 264, 276, 129, 92, 54, 0, 642, 589, 446, 383, 549, 332, 274, 160, 286, 199, 97, 52, 0), # 70
(705, 653, 563, 621, 544, 251, 245, 266, 280, 132, 95, 54, 0, 651, 598, 454, 388, 552, 344, 276, 161, 293, 199, 98, 52, 0), # 71
(716, 660, 572, 631, 553, 253, 249, 270, 286, 135, 95, 55, 0, 657, 608, 461, 394, 559, 348, 283, 163, 297, 203, 100, 53, 0), # 72
(724, 666, 580, 646, 565, 253, 253, 273, 287, 137, 96, 56, 0, 668, 614, 466, 403, 565, 353, 285, 167, 301, 205, 102, 53, 0), # 73
(732, 673, 591, 655, 573, 257, 256, 278, 295, 137, 99, 57, 0, 679, 619, 474, 410, 573, 356, 289, 168, 301, 208, 103, 54, 0), # 74
(742, 683, 599, 662, 576, 259, 260, 285, 305, 139, 100, 57, 0, 692, 624, 480, 418, 583, 358, 293, 169, 305, 212, 107, 56, 0), # 75
(752, 695, 610, 669, 581, 265, 263, 287, 309, 140, 101, 58, 0, 701, 632, 486, 423, 596, 361, 299, 170, 310, 215, 107, 56, 0), # 76
(759, 703, 617, 678, 589, 273, 268, 291, 314, 142, 104, 59, 0, 708, 641, 491, 427, 604, 368, 303, 173, 310, 216, 109, 57, 0), # 77
(765, 711, 625, 687, 596, 279, 275, 296, 317, 146, 106, 60, 0, 716, 650, 498, 432, 611, 374, 308, 175, 317, 217, 110, 57, 0), # 78
(778, 725, 632, 695, 602, 282, 281, 298, 322, 147, 107, 61, 0, 728, 657, 504, 436, 619, 378, 312, 176, 324, 220, 110, 57, 0), # 79
(793, 735, 644, 703, 607, 285, 285, 302, 326, 147, 109, 63, 0, 734, 663, 513, 444, 628, 381, 313, 179, 331, 222, 111, 58, 0), # 80
(808, 743, 656, 714, 619, 286, 288, 307, 329, 148, 113, 63, 0, 752, 672, 517, 448, 632, 385, 316, 182, 334, 222, 112, 58, 0), # 81
(815, 748, 665, 723, 628, 292, 292, 310, 336, 151, 114, 64, 0, 761, 678, 527, 450, 643, 386, 324, 184, 339, 226, 115, 60, 0), # 82
(827, 758, 674, 730, 636, 297, 299, 315, 338, 154, 114, 65, 0, 773, 687, 537, 454, 651, 387, 329, 185, 345, 229, 117, 61, 0), # 83
(835, 768, 681, 741, 645, 300, 300, 318, 345, 155, 117, 66, 0, 782, 700, 542, 463, 657, 390, 335, 186, 347, 233, 117, 61, 0), # 84
(845, 776, 686, 745, 653, 303, 301, 318, 349, 157, 118, 66, 0, 789, 702, 549, 468, 670, 394, 339, 190, 348, 239, 118, 62, 0), # 85
(854, 782, 692, 756, 661, 305, 304, 323, 352, 158, 120, 68, 0, 801, 707, 556, 474, 675, 397, 341, 192, 353, 244, 119, 62, 0), # 86
(862, 791, 704, 763, 670, 311, 309, 326, 358, 159, 120, 69, 0, 813, 719, 561, 478, 683, 401, 344, 193, 356, 250, 119, 62, 0), # 87
(867, 798, 716, 771, 678, 314, 312, 327, 359, 162, 121, 70, 0, 821, 727, 567, 486, 691, 404, 347, 193, 358, 252, 121, 62, 0), # 88
(877, 808, 720, 774, 685, 316, 314, 327, 362, 163, 122, 71, 0, 837, 738, 574, 492, 696, 406, 349, 195, 359, 254, 121, 62, 0), # 89
(883, 821, 725, 787, 692, 320, 316, 331, 369, 166, 124, 71, 0, 845, 744, 579, 495, 705, 410, 354, 196, 362, 256, 123, 63, 0), # 90
(891, 832, 736, 794, 703, 324, 318, 334, 371, 168, 124, 71, 0, 855, 748, 587, 503, 712, 413, 358, 199, 364, 260, 125, 64, 0), # 91
(901, 840, 742, 804, 710, 329, 323, 340, 373, 169, 125, 72, 0, 860, 758, 597, 507, 723, 419, 363, 202, 366, 264, 128, 64, 0), # 92
(908, 847, 753, 809, 716, 330, 329, 342, 377, 170, 126, 74, 0, 872, 767, 605, 509, 732, 422, 368, 202, 371, 267, 129, 64, 0), # 93
(923, 853, 758, 814, 726, 334, 332, 343, 383, 170, 128, 75, 0, 881, 775, 611, 513, 739, 427, 373, 203, 377, 270, 131, 64, 0), # 94
(934, 857, 761, 823, 734, 337, 335, 347, 391, 171, 128, 76, 0, 891, 789, 612, 517, 748, 430, 374, 203, 379, 272, 132, 65, 0), # 95
(942, 867, 768, 829, 738, 338, 341, 348, 393, 171, 131, 77, 0, 910, 795, 616, 524, 755, 436, 374, 207, 383, 277, 135, 66, 0), # 96
(951, 876, 776, 841, 748, 341, 342, 352, 396, 172, 132, 77, 0, 922, 805, 624, 529, 763, 440, 377, 208, 385, 279, 138, 67, 0), # 97
(960, 886, 782, 850, 758, 343, 347, 355, 399, 175, 134, 79, 0, 936, 814, 629, 536, 771, 444, 379, 210, 390, 280, 138, 67, 0), # 98
(971, 893, 790, 858, 761, 350, 352, 358, 402, 175, 134, 79, 0, 947, 820, 634, 543, 782, 449, 384, 213, 395, 281, 139, 69, 0), # 99
(977, 895, 799, 864, 771, 353, 355, 361, 407, 175, 135, 81, 0, 959, 828, 643, 547, 792, 452, 387, 215, 401, 284, 142, 69, 0), # 100
(988, 906, 809, 871, 773, 359, 358, 365, 413, 180, 135, 81, 0, 966, 836, 648, 553, 805, 459, 390, 217, 404, 285, 143, 69, 0), # 101
(995, 913, 811, 878, 777, 362, 359, 368, 417, 182, 135, 81, 0, 982, 840, 651, 556, 810, 463, 393, 217, 407, 287, 144, 69, 0), # 102
(1005, 921, 823, 881, 783, 365, 366, 373, 421, 183, 136, 83, 0, 989, 845, 656, 561, 814, 468, 396, 218, 407, 290, 148, 69, 0), # 103
(1020, 933, 830, 887, 788, 368, 369, 377, 424, 184, 137, 83, 0, 994, 848, 667, 567, 822, 473, 398, 219, 411, 291, 149, 69, 0), # 104
(1032, 943, 838, 895, 796, 371, 373, 382, 428, 186, 138, 83, 0, 1008, 856, 671, 570, 828, 478, 401, 220, 414, 293, 151, 69, 0), # 105
(1038, 947, 847, 908, 800, 373, 374, 383, 435, 189, 138, 83, 0, 1022, 863, 676, 573, 834, 482, 406, 222, 422, 293, 153, 69, 0), # 106
(1046, 950, 856, 916, 806, 373, 379, 388, 437, 191, 139, 84, 0, 1029, 869, 683, 579, 840, 485, 410, 224, 426, 295, 155, 71, 0), # 107
(1058, 959, 867, 925, 816, 375, 382, 392, 438, 192, 139, 84, 0, 1036, 878, 690, 582, 844, 487, 412, 225, 430, 296, 156, 71, 0), # 108
(1071, 968, 875, 935, 822, 381, 386, 394, 441, 193, 141, 85, 0, 1051, 888, 696, 585, 851, 493, 417, 229, 433, 301, 158, 71, 0), # 109
(1081, 979, 883, 950, 829, 382, 389, 397, 448, 195, 142, 86, 0, 1060, 898, 705, 590, 860, 495, 423, 230, 436, 303, 158, 71, 0), # 110
(1090, 981, 889, 957, 836, 389, 392, 401, 452, 195, 142, 86, 0, 1063, 907, 714, 593, 862, 499, 425, 235, 441, 304, 161, 71, 0), # 111
(1098, 985, 903, 970, 842, 390, 395, 404, 455, 197, 144, 87, 0, 1071, 913, 721, 598, 869, 501, 427, 236, 443, 310, 164, 71, 0), # 112
(1103, 997, 908, 975, 848, 391, 396, 407, 456, 198, 145, 87, 0, 1083, 922, 731, 600, 873, 504, 432, 241, 445, 316, 165, 71, 0), # 113
(1112, 1006, 915, 987, 855, 393, 401, 409, 462, 198, 145, 87, 0, 1095, 929, 736, 606, 883, 507, 435, 242, 450, 316, 165, 72, 0), # 114
(1123, 1014, 920, 996, 863, 395, 402, 410, 470, 199, 146, 88, 0, 1104, 937, 744, 612, 890, 509, 439, 247, 450, 318, 168, 72, 0), # 115
(1135, 1021, 926, 1003, 870, 397, 408, 412, 472, 201, 147, 88, 0, 1120, 940, 748, 615, 897, 516, 447, 249, 457, 320, 168, 72, 0), # 116
(1140, 1032, 940, 1010, 874, 400, 409, 414, 474, 203, 149, 88, 0, 1127, 947, 756, 619, 902, 516, 449, 251, 463, 320, 171, 72, 0), # 117
(1148, 1042, 945, 1015, 884, 402, 411, 417, 475, 204, 150, 88, 0, 1131, 956, 760, 624, 910, 522, 452, 254, 468, 323, 172, 72, 0), # 118
(1159, 1050, 953, 1024, 893, 407, 412, 419, 476, 206, 153, 89, 0, 1140, 964, 768, 627, 922, 525, 455, 257, 471, 324, 174, 72, 0), # 119
(1162, 1055, 956, 1033, 896, 409, 415, 421, 484, 209, 155, 89, 0, 1147, 971, 774, 629, 930, 528, 458, 259, 472, 325, 175, 72, 0), # 120
(1170, 1063, 965, 1044, 910, 411, 419, 421, 490, 211, 156, 90, 0, 1161, 979, 781, 633, 937, 530, 462, 261, 477, 327, 175, 73, 0), # 121
(1180, 1068, 971, 1057, 918, 414, 424, 425, 495, 213, 160, 92, 0, 1173, 984, 787, 638, 944, 534, 466, 263, 481, 328, 175, 73, 0), # 122
(1189, 1078, 976, 1065, 929, 420, 428, 426, 497, 213, 161, 92, 0, 1182, 993, 795, 647, 950, 536, 468, 268, 483, 328, 175, 74, 0), # 123
(1193, 1084, 982, 1072, 938, 423, 428, 430, 505, 215, 164, 94, 0, 1189, 999, 796, 652, 955, 539, 469, 270, 484, 332, 178, 74, 0), # 124
(1197, 1092, 987, 1078, 947, 425, 431, 432, 507, 216, 167, 94, 0, 1196, 1005, 799, 657, 962, 544, 474, 271, 485, 335, 178, 75, 0), # 125
(1205, 1096, 991, 1087, 958, 431, 431, 435, 511, 216, 167, 94, 0, 1206, 1012, 802, 660, 966, 549, 479, 274, 489, 336, 179, 77, 0), # 126
(1213, 1103, 999, 1092, 965, 433, 433, 439, 513, 217, 167, 94, 0, 1223, 1020, 804, 666, 973, 555, 482, 277, 492, 339, 181, 79, 0), # 127
(1224, 1107, 1005, 1102, 971, 439, 436, 443, 515, 217, 167, 97, 0, 1229, 1024, 812, 667, 978, 557, 484, 278, 496, 341, 182, 79, 0), # 128
(1233, 1117, 1016, 1108, 977, 445, 437, 445, 520, 217, 169, 100, 0, 1242, 1030, 819, 671, 981, 559, 486, 282, 499, 345, 183, 79, 0), # 129
(1243, 1126, 1023, 1116, 987, 450, 441, 446, 526, 217, 172, 100, 0, 1255, 1035, 827, 675, 983, 560, 488, 283, 502, 347, 183, 80, 0), # 130
(1253, 1132, 1027, 1125, 992, 453, 444, 448, 530, 220, 172, 101, 0, 1265, 1045, 829, 680, 989, 564, 489, 284, 507, 348, 184, 82, 0), # 131
(1260, 1137, 1033, 1135, 998, 455, 451, 453, 535, 222, 173, 104, 0, 1269, 1052, 837, 685, 998, 568, 493, 285, 512, 351, 184, 84, 0), # 132
(1267, 1138, 1035, 1144, 1007, 460, 452, 453, 537, 223, 174, 104, 0, 1282, 1054, 841, 688, 1003, 570, 497, 289, 518, 352, 187, 85, 0), # 133
(1270, 1148, 1040, 1150, 1015, 461, 454, 453, 539, 224, 175, 104, 0, 1290, 1063, 844, 691, 1006, 573, 499, 293, 519, 354, 188, 85, 0), # 134
(1275, 1153, 1044, 1158, 1019, 462, 457, 454, 544, 225, 176, 104, 0, 1296, 1074, 851, 692, 1013, 577, 501, 294, 524, 355, 189, 85, 0), # 135
(1281, 1162, 1050, 1168, 1027, 464, 459, 456, 545, 227, 177, 104, 0, 1305, 1079, 862, 695, 1018, 578, 504, 298, 526, 359, 189, 85, 0), # 136
(1296, 1169, 1058, 1177, 1032, 465, 459, 457, 550, 228, 177, 105, 0, 1312, 1086, 870, 696, 1021, 582, 508, 302, 529, 360, 192, 86, 0), # 137
(1310, 1171, 1064, 1185, 1038, 468, 461, 458, 553, 229, 177, 106, 0, 1320, 1094, 879, 700, 1024, 588, 511, 302, 533, 363, 194, 86, 0), # 138
(1324, 1177, 1073, 1192, 1059, 469, 463, 460, 554, 230, 177, 107, 0, 1325, 1100, 884, 701, 1032, 588, 517, 304, 535, 365, 194, 86, 0), # 139
(1338, 1186, 1079, 1198, 1070, 474, 466, 464, 559, 233, 178, 109, 0, 1329, 1109, 891, 706, 1039, 590, 518, 305, 541, 368, 196, 86, 0), # 140
(1345, 1189, 1087, 1203, 1073, 477, 469, 465, 565, 233, 179, 109, 0, 1339, 1117, 898, 709, 1046, 597, 523, 305, 545, 369, 197, 87, 0), # 141
(1353, 1194, 1091, 1215, 1076, 477, 471, 467, 567, 234, 179, 109, 0, 1350, 1126, 907, 711, 1052, 599, 525, 307, 547, 371, 198, 88, 0), # 142
(1365, 1199, 1097, 1225, 1079, 478, 474, 469, 568, 237, 179, 109, 0, 1358, 1137, 915, 716, 1059, 605, 527, 308, 549, 373, 200, 88, 0), # 143
(1377, 1205, 1107, 1232, 1088, 482, 476, 472, 570, 239, 180, 111, 0, 1365, 1145, 919, 722, 1064, 610, 529, 309, 553, 373, 204, 88, 0), # 144
(1388, 1213, 1113, 1245, 1094, 486, 477, 474, 573, 240, 180, 111, 0, 1381, 1152, 924, 724, 1070, 616, 532, 313, 559, 374, 204, 88, 0), # 145
(1399, 1217, 1120, 1254, 1100, 489, 482, 479, 576, 241, 180, 112, 0, 1384, 1160, 931, 730, 1077, 625, 537, 313, 562, 377, 205, 88, 0), # 146
(1407, 1223, 1126, 1257, 1107, 493, 485, 480, 582, 243, 181, 113, 0, 1391, 1169, 933, 734, 1090, 626, 538, 315, 565, 379, 206, 89, 0), # 147
(1415, 1227, 1131, 1264, 1118, 495, 485, 480, 586, 243, 183, 115, 0, 1396, 1177, 939, 738, 1097, 633, 538, 316, 568, 381, 207, 89, 0), # 148
(1424, 1232, 1136, 1276, 1123, 499, 487, 482, 592, 243, 185, 115, 0, 1405, 1181, 944, 741, 1100, 636, 541, 319, 571, 384, 207, 89, 0), # 149
(1437, 1235, 1141, 1280, 1128, 499, 491, 488, 592, 243, 185, 116, 0, 1415, 1186, 951, 745, 1104, 639, 545, 320, 573, 388, 209, 90, 0), # 150
(1450, 1238, 1146, 1287, 1134, 509, 493, 489, 597, 243, 185, 116, 0, 1424, 1196, 957, 750, 1109, 641, 549, 320, 576, 389, 209, 91, 0), # 151
(1456, 1245, 1149, 1295, 1141, 512, 493, 490, 604, 245, 186, 116, 0, 1429, 1205, 964, 756, 1115, 643, 550, 322, 578, 392, 209, 91, 0), # 152
(1462, 1251, 1156, 1306, 1144, 517, 496, 492, 606, 246, 187, 116, 0, 1437, 1211, 972, 760, 1121, 646, 550, 325, 580, 393, 212, 92, 0), # 153
(1470, 1261, 1160, 1319, 1151, 521, 499, 495, 608, 247, 187, 116, 0, 1450, 1217, 977, 763, 1126, 649, 551, 327, 582, 399, 213, 92, 0), # 154
(1476, 1265, 1168, 1320, 1153, 522, 500, 496, 611, 247, 187, 116, 0, 1461, 1227, 984, 770, 1135, 653, 554, 328, 585, 400, 213, 92, 0), # 155
(1484, 1271, 1172, 1326, 1157, 524, 501, 501, 612, 248, 187, 116, 0, 1466, 1232, 986, 776, 1145, 660, 559, 331, 587, 402, 214, 94, 0), # 156
(1490, 1276, 1178, 1335, 1166, 526, 502, 502, 613, 249, 189, 117, 0, 1478, 1243, 988, 776, 1152, 664, 561, 336, 590, 405, 215, 94, 0), # 157
(1492, 1278, 1184, 1340, 1175, 529, 504, 507, 617, 250, 192, 117, 0, 1487, 1248, 996, 778, 1165, 670, 564, 338, 590, 407, 216, 94, 0), # 158
(1495, 1284, 1190, 1345, 1181, 532, 504, 511, 622, 250, 192, 118, 0, 1499, 1254, 999, 781, 1170, 675, 568, 341, 591, 409, 216, 94, 0), # 159
(1504, 1288, 1195, 1351, 1197, 537, 505, 515, 625, 251, 193, 118, 0, 1505, 1266, 1003, 785, 1181, 680, 575, 344, 593, 410, 217, 95, 0), # 160
(1507, 1293, 1201, 1360, 1207, 544, 509, 517, 629, 252, 193, 118, 0, 1512, 1271, 1006, 787, 1189, 683, 578, 348, 597, 413, 217, 95, 0), # 161
(1513, 1300, 1205, 1364, 1213, 546, 509, 521, 632, 253, 193, 119, 0, 1518, 1277, 1009, 790, 1197, 684, 580, 350, 599, 414, 220, 95, 0), # 162
(1518, 1304, 1214, 1370, 1222, 551, 511, 525, 635, 253, 195, 120, 0, 1530, 1281, 1011, 793, 1201, 687, 580, 351, 602, 415, 222, 96, 0), # 163
(1526, 1308, 1221, 1373, 1229, 552, 511, 525, 638, 255, 195, 123, 0, 1536, 1288, 1016, 795, 1207, 690, 582, 355, 603, 415, 226, 96, 0), # 164
(1530, 1313, 1230, 1377, 1235, 553, 512, 528, 641, 257, 197, 123, 0, 1539, 1292, 1019, 800, 1214, 691, 584, 360, 605, 423, 228, 97, 0), # 165
(1534, 1316, 1233, 1383, 1237, 554, 513, 528, 647, 258, 197, 123, 0, 1547, 1298, 1021, 805, 1220, 692, 585, 361, 606, 425, 228, 97, 0), # 166
(1545, 1323, 1240, 1387, 1246, 555, 514, 529, 649, 259, 198, 123, 0, 1552, 1306, 1023, 805, 1231, 694, 586, 364, 610, 429, 228, 100, 0), # 167
(1552, 1329, 1242, 1391, 1247, 557, 515, 531, 653, 260, 199, 123, 0, 1560, 1307, 1027, 810, 1234, 696, 589, 366, 614, 431, 229, 100, 0), # 168
(1560, 1334, 1244, 1395, 1253, 558, 515, 531, 654, 260, 199, 124, 0, 1570, 1311, 1032, 813, 1242, 700, 592, 367, 616, 433, 231, 100, 0), # 169
(1565, 1339, 1249, 1398, 1260, 559, 519, 531, 654, 265, 200, 124, 0, 1577, 1317, 1035, 815, 1247, 703, 594, 368, 617, 435, 231, 100, 0), # 170
(1571, 1342, 1252, 1404, 1265, 560, 520, 531, 658, 267, 202, 124, 0, 1582, 1318, 1041, 818, 1249, 704, 598, 369, 618, 435, 234, 100, 0), # 171
(1580, 1343, 1254, 1412, 1265, 563, 520, 533, 662, 267, 204, 124, 0, 1587, 1322, 1045, 822, 1255, 704, 600, 370, 622, 437, 235, 100, 0), # 172
(1584, 1347, 1261, 1414, 1266, 565, 523, 535, 662, 267, 207, 124, 0, 1594, 1326, 1049, 825, 1261, 711, 601, 370, 623, 439, 238, 100, 0), # 173
(1593, 1350, 1266, 1417, 1271, 566, 524, 536, 664, 270, 210, 124, 0, 1601, 1329, 1051, 828, 1263, 712, 603, 371, 625, 440, 239, 100, 0), # 174
(1595, 1353, 1277, 1421, 1272, 567, 525, 536, 666, 270, 212, 124, 0, 1607, 1333, 1053, 828, 1268, 716, 604, 372, 628, 441, 241, 100, 0), # 175
(1598, 1356, 1282, 1424, 1277, 569, 525, 537, 666, 270, 213, 125, 0, 1616, 1335, 1055, 829, 1269, 721, 605, 373, 630, 441, 241, 101, 0), # 176
(1600, 1358, 1286, 1427, 1280, 571, 527, 537, 667, 271, 213, 126, 0, 1621, 1338, 1057, 831, 1274, 722, 606, 374, 631, 444, 241, 102, 0), # 177
(1602, 1359, 1291, 1427, 1282, 572, 529, 538, 667, 272, 213, 127, 0, 1629, 1343, 1061, 833, 1279, 724, 606, 374, 633, 446, 241, 102, 0), # 178
(1602, 1359, 1291, 1427, 1282, 572, 529, 538, 667, 272, 213, 127, 0, 1629, 1343, 1061, 833, 1279, 724, 606, 374, 633, 446, 241, 102, 0), # 179
)
passenger_arriving_rate = (
(5.020865578371768, 5.064847846385402, 4.342736024677089, 4.661000830397574, 3.7031237384064077, 1.8308820436884476, 2.0730178076869574, 1.938823405408093, 2.030033020722669, 0.9895037538805926, 0.7008775273142672, 0.4081595898588478, 0.0, 5.083880212578363, 4.489755488447325, 3.5043876365713356, 2.968511261641777, 4.060066041445338, 2.7143527675713304, 2.0730178076869574, 1.3077728883488913, 1.8515618692032039, 1.5536669434658585, 0.8685472049354179, 0.4604407133077639, 0.0), # 0
(5.354327152019974, 5.399222302966028, 4.629455492775127, 4.968858189957462, 3.948326891649491, 1.9518237573581576, 2.209734470631847, 2.066464051210712, 2.164081775444303, 1.0547451730692876, 0.7471826893260219, 0.4351013884011963, 0.0, 5.419791647439855, 4.786115272413158, 3.73591344663011, 3.164235519207862, 4.328163550888606, 2.8930496716949965, 2.209734470631847, 1.3941598266843982, 1.9741634458247455, 1.6562860633191545, 0.9258910985550255, 0.49083839117872996, 0.0), # 1
(5.686723008979731, 5.732269739983398, 4.915035237956178, 5.275490778498595, 4.192641982499829, 2.072282983465593, 2.345909253980352, 2.193593853293508, 2.297595602292516, 1.1197284437551367, 0.7933038581293855, 0.46193605433775464, 0.0, 5.75436482820969, 5.0812965977153, 3.9665192906469278, 3.3591853312654094, 4.595191204585032, 3.0710313946109116, 2.345909253980352, 1.480202131046852, 2.0963209912499146, 1.758496926166199, 0.9830070475912357, 0.5211154309075817, 0.0), # 2
(6.016757793146562, 6.062668793441743, 5.198342391099879, 5.579682305649055, 4.435107784001268, 2.191782029841316, 2.4810018208239777, 2.3197088156227115, 2.430045053640364, 1.1841956746065454, 0.8390580686378972, 0.4885571404108718, 0.0, 6.086272806254225, 5.374128544519589, 4.195290343189486, 3.5525870238196355, 4.860090107280728, 3.247592341871796, 2.4810018208239777, 1.5655585927437972, 2.217553892000634, 1.8598941018830188, 1.0396684782199759, 0.551151708494704, 0.0), # 3
(6.343136148415981, 6.389098099345293, 5.478244083085864, 5.880216481036927, 4.674763069197661, 2.3098432043158894, 2.6144718342542292, 2.444304942164548, 2.560900681860902, 1.24788897429192, 0.8842623557650959, 0.514858199362897, 0.0, 6.414188632939817, 5.6634401929918665, 4.42131177882548, 3.743666922875759, 5.121801363721804, 3.422026919030367, 2.6144718342542292, 1.6498880030827783, 2.3373815345988307, 1.9600721603456428, 1.095648816617173, 0.5808270999404813, 0.0), # 4
(6.66456271868351, 6.710236293698289, 5.753607444793765, 6.175877014290295, 4.910646611132853, 2.4259888147198754, 2.745778957362612, 2.566878236885247, 2.689633039327186, 1.310550451479666, 0.9287337544245222, 0.5407327839361791, 0.0, 6.736785359632827, 5.948060623297969, 4.64366877212261, 3.9316513544389973, 5.379266078654372, 3.593629531639346, 2.745778957362612, 1.7328491533713395, 2.4553233055664263, 2.058625671430099, 1.1507214889587531, 0.6100214812452991, 0.0), # 5
(6.979742147844666, 7.024762012504959, 6.023299607103222, 6.465447615037239, 5.141797182850695, 2.5397411688838374, 2.8743828532406313, 2.686924703751037, 2.8157126784122717, 1.3719222148381898, 0.9722892995297139, 0.5660744468730674, 0.0, 7.052736037699606, 6.22681891560374, 4.8614464976485685, 4.115766644514569, 5.631425356824543, 3.761694585251452, 2.8743828532406313, 1.8141008349170267, 2.5708985914253475, 2.1551492050124135, 1.2046599214206444, 0.6386147284095418, 0.0), # 6
(7.2873790797949685, 7.331353891769537, 6.286187700893863, 6.747711992905847, 5.367253557395036, 2.650622574638337, 2.9997431849797924, 2.8039403467281465, 2.9386101514892147, 1.4317463730358968, 1.0147460259942116, 0.5907767409159108, 0.0, 7.360713718506519, 6.498544150075018, 5.073730129971057, 4.2952391191076895, 5.877220302978429, 3.9255164854194056, 2.9997431849797924, 1.8933018390273837, 2.683626778697518, 2.249237330968616, 1.2572375401787725, 0.6664867174335943, 0.0), # 7
(7.586178158429934, 7.628690567496257, 6.54113885704533, 7.021453857524196, 5.586054507809724, 2.7581553398139356, 3.1213196156715988, 2.917421169782802, 3.0577960109310682, 1.4897650347411937, 1.0559209687315536, 0.6147332188070586, 0.0, 7.659391453419917, 6.762065406877643, 5.279604843657768, 4.469295104223581, 6.1155920218621365, 4.084389637695923, 3.1213196156715988, 1.970110957009954, 2.793027253904862, 2.3404846191747324, 1.3082277714090662, 0.6935173243178416, 0.0), # 8
(7.874844027645085, 7.915450675689353, 6.787020206437253, 7.285456918520376, 5.797238807138606, 2.861861772241199, 3.23857180840756, 3.0268631768812346, 3.1727408091108913, 1.5457203086224858, 1.0956311626552797, 0.6378374332888596, 0.0, 7.947442293806162, 7.016211766177453, 5.478155813276398, 4.637160925867456, 6.345481618221783, 4.237608447633728, 3.23857180840756, 2.044186980172285, 2.898619403569303, 2.4284856395067926, 1.3574040412874508, 0.7195864250626686, 0.0), # 9
(8.152081331335932, 8.190312852353056, 7.022698879949271, 7.538504885522466, 5.999845228425533, 2.961264179750688, 3.3509594262791773, 3.1317623719896712, 3.282915098401738, 1.599354303348179, 1.133693642678929, 0.6599829371036627, 0.0, 8.22353929103161, 7.259812308140289, 5.668468213394645, 4.798062910044536, 6.565830196803476, 4.384467320785539, 3.3509594262791773, 2.11518869982192, 2.9999226142127666, 2.5128349618408223, 1.4045397759898541, 0.7445738956684597, 0.0), # 10
(8.416594713398005, 8.451955733491605, 7.247042008461013, 7.779381468158547, 6.192912544714355, 3.055884870172965, 3.457942132377958, 3.2316147590743394, 3.3877894311766643, 1.6504091275866801, 1.1699254437160416, 0.6810632829938176, 0.0, 8.486355496462611, 7.491696112931993, 5.849627218580208, 4.951227382760039, 6.775578862353329, 4.524260662704076, 3.457942132377958, 2.1827749072664036, 3.0964562723571776, 2.5931271560528497, 1.4494084016922026, 0.7683596121356006, 0.0), # 11
(8.667088817726812, 8.699057955109222, 7.458916722852117, 8.006870376056709, 6.375479529048918, 3.1452461513385908, 3.5589795897954057, 3.325916342101467, 3.486834359808726, 1.6986268900063934, 1.2041436006801558, 0.7009720237016724, 0.0, 8.734563961465534, 7.710692260718395, 6.020718003400779, 5.095880670019179, 6.973668719617452, 4.656282878942054, 3.5589795897954057, 2.246604393813279, 3.187739764524459, 2.6689567920189035, 1.4917833445704234, 0.7908234504644749, 0.0), # 12
(8.902268288217876, 8.93029815321015, 7.657190154002218, 8.219755318845033, 6.546584954473067, 3.2288703310781304, 3.653531461623028, 3.414163125037284, 3.579520436670977, 1.7437496992757264, 1.2361651484848115, 0.7196027119695768, 0.0, 8.966837737406735, 7.915629831665344, 6.180825742424058, 5.2312490978271775, 7.159040873341954, 4.7798283750521975, 3.653531461623028, 2.306335950770093, 3.2732924772365335, 2.7399184396150114, 1.5314380308004438, 0.8118452866554684, 0.0), # 13
(9.120837768766716, 9.144354963798623, 7.840729432790956, 8.416820006151594, 6.705267594030659, 3.306279717222145, 3.7410574109523305, 3.4958511118480193, 3.6653182141364735, 1.785519664063084, 1.2658071220435476, 0.7368489005398801, 0.0, 9.181849875652563, 8.10533790593868, 6.329035610217737, 5.3565589921892505, 7.330636428272947, 4.894191556587227, 3.7410574109523305, 2.3616283694443894, 3.3526337970153297, 2.8056066687171985, 1.5681458865581912, 0.8313049967089657, 0.0), # 14
(9.321501903268855, 9.339907022878865, 8.008401690097953, 8.59684814760449, 6.850566220765538, 3.376996617601199, 3.821017100874813, 3.5704763064998986, 3.743698244578273, 1.823678893036873, 1.2928865562699035, 0.752604142154931, 0.0, 9.37827342756938, 8.27864556370424, 6.464432781349516, 5.471036679110618, 7.487396489156546, 4.998666829099858, 3.821017100874813, 2.4121404411437135, 3.425283110382769, 2.865616049201497, 1.6016803380195905, 0.8490824566253515, 0.0), # 15
(9.5029653356198, 9.51563296645512, 8.159074056802854, 8.758623452831788, 6.981519607721555, 3.4405433400458514, 3.892870194481988, 3.6375347129591504, 3.8141310803694286, 1.8579694948654994, 1.3172204860774188, 0.7667619895570784, 0.0, 9.554781444523545, 8.434381885127861, 6.586102430387094, 5.5739084845964975, 7.628262160738857, 5.092548598142811, 3.892870194481988, 2.4575309571756083, 3.4907598038607777, 2.9195411509439295, 1.6318148113605708, 0.8650575424050111, 0.0), # 16
(9.663932709715075, 9.670211430531618, 8.291613663785293, 8.900929631461583, 7.097166527942559, 3.4964421923866666, 3.9560763548653552, 3.6965223351920073, 3.8760872738829946, 1.8881335782173672, 1.3386259463796333, 0.7792159954886714, 0.0, 9.710046977881415, 8.571375950375383, 6.693129731898166, 5.6644007346521, 7.752174547765989, 5.17513126926881, 3.9560763548653552, 2.4974587088476192, 3.5485832639712793, 2.9669765438205284, 1.6583227327570589, 0.8791101300483289, 0.0), # 17
(9.803108669450204, 9.802321051112584, 8.404887641924901, 9.022550393121959, 7.1965457544723925, 3.5442154824542103, 4.010095245116426, 3.746935177164692, 3.929037377492032, 1.9139132517608846, 1.3569199720900849, 0.7898597126920597, 0.0, 9.842743079009345, 8.688456839612655, 6.784599860450424, 5.741739755282652, 7.858074754984064, 5.245709248030569, 4.010095245116426, 2.531582487467293, 3.5982728772361963, 3.0075167977073205, 1.6809775283849802, 0.8911200955556896, 0.0), # 18
(9.919197858720699, 9.910640464202265, 8.497763122101317, 9.122269447440985, 7.2786960603549105, 3.5833855180790386, 4.054386528326697, 3.7882692428434357, 3.9724519435695926, 1.9350506241644574, 1.3719195981223131, 0.7985866939095915, 0.0, 9.951542799273696, 8.784453633005505, 6.859597990611565, 5.80515187249337, 7.944903887139185, 5.30357693998081, 4.054386528326697, 2.55956108434217, 3.6393480301774552, 3.0407564824803295, 1.6995526244202632, 0.9009673149274788, 0.0), # 19
(10.010904921422082, 9.993848305804882, 8.569107235194169, 9.198870504046766, 7.342656218633962, 3.613474607091719, 4.088409867587681, 3.8200205361944657, 4.005801524488732, 1.95128780409649, 1.3834418593898585, 0.805290491883616, 0.0, 10.035119190040824, 8.858195410719775, 6.9172092969492915, 5.853863412289469, 8.011603048977465, 5.348028750672252, 4.088409867587681, 2.5810532907797996, 3.671328109316981, 3.0662901680155894, 1.713821447038834, 0.9085316641640803, 0.0), # 20
(10.076934501449866, 10.050623211924679, 8.6177871120831, 9.251137272567364, 7.387465002353392, 3.6340050573228124, 4.1116249259908795, 3.84168506118401, 4.028556672622507, 1.9623669002253892, 1.39130379080626, 0.8098646593564828, 0.0, 10.092145302677078, 8.90851125292131, 6.9565189540313, 5.887100700676166, 8.057113345245014, 5.378359085657614, 4.1116249259908795, 2.5957178980877234, 3.693732501176696, 3.0837124241891223, 1.72355742241662, 0.91369301926588, 0.0), # 21
(10.115991242699579, 10.079643818565883, 8.642669883647738, 9.277853462630876, 7.41216118455705, 3.644499176602881, 4.1234913666278, 3.852758821778298, 4.040187940343971, 1.968030021219561, 1.3953224272850568, 0.8122027490705409, 0.0, 10.121294188548827, 8.934230239775948, 6.976612136425284, 5.904090063658682, 8.080375880687942, 5.393862350489617, 4.1234913666278, 2.6032136975734863, 3.706080592278525, 3.09261782087696, 1.7285339767295478, 0.9163312562332622, 0.0), # 22
(10.13039336334264, 10.083079961133974, 8.645769318701419, 9.281198109567903, 7.418488037355065, 3.6458333333333335, 4.124902001129669, 3.8539557613168727, 4.0416420781893, 1.9686980681298587, 1.3958263395269568, 0.8124914647157445, 0.0, 10.125, 8.93740611187319, 6.9791316976347835, 5.906094204389575, 8.0832841563786, 5.395538065843622, 4.124902001129669, 2.604166666666667, 3.7092440186775324, 3.0937327031893016, 1.729153863740284, 0.9166436328303613, 0.0), # 23
(10.141012413034153, 10.08107561728395, 8.645262345679013, 9.280786458333335, 7.422071742409901, 3.6458333333333335, 4.124126906318083, 3.852291666666667, 4.041447222222222, 1.968287654320988, 1.39577076318743, 0.8124238683127573, 0.0, 10.125, 8.936662551440328, 6.978853815937151, 5.904862962962962, 8.082894444444443, 5.393208333333334, 4.124126906318083, 2.604166666666667, 3.7110358712049507, 3.0935954861111123, 1.7290524691358027, 0.9164614197530866, 0.0), # 24
(10.15140723021158, 10.077124771376313, 8.644261545496114, 9.279972029320987, 7.4255766303963355, 3.6458333333333335, 4.122599451303155, 3.8490226337448563, 4.041062242798354, 1.96747970964792, 1.3956605665710604, 0.8122904282883707, 0.0, 10.125, 8.935194711172077, 6.978302832855302, 5.902439128943758, 8.082124485596708, 5.388631687242799, 4.122599451303155, 2.604166666666667, 3.7127883151981678, 3.0933240097736636, 1.728852309099223, 0.9161022519433014, 0.0), # 25
(10.161577019048034, 10.071287780064015, 8.642780635573846, 9.278764081790122, 7.429002578947403, 3.6458333333333335, 4.120343359154361, 3.8442103909465026, 4.0404920781893, 1.9662876771833566, 1.3954967473084758, 0.8120929736320684, 0.0, 10.125, 8.933022709952752, 6.977483736542379, 5.898863031550069, 8.0809841563786, 5.381894547325103, 4.120343359154361, 2.604166666666667, 3.7145012894737013, 3.0929213605967085, 1.7285561271147696, 0.915571616369456, 0.0), # 26
(10.171520983716636, 10.063624999999998, 8.640833333333333, 9.277171874999999, 7.432349465696142, 3.6458333333333335, 4.117382352941177, 3.837916666666667, 4.039741666666666, 1.9647250000000003, 1.3952803030303031, 0.8118333333333335, 0.0, 10.125, 8.930166666666667, 6.976401515151515, 5.894175, 8.079483333333332, 5.373083333333334, 4.117382352941177, 2.604166666666667, 3.716174732848071, 3.0923906250000006, 1.7281666666666669, 0.914875, 0.0), # 27
(10.181238328390501, 10.054196787837219, 8.638433356195703, 9.275204668209877, 7.4356171682756, 3.6458333333333335, 4.113740155733075, 3.830203189300412, 4.038815946502057, 1.9628051211705537, 1.3950122313671698, 0.8115133363816492, 0.0, 10.125, 8.926646700198141, 6.9750611568358485, 5.88841536351166, 8.077631893004114, 5.3622844650205765, 4.113740155733075, 2.604166666666667, 3.7178085841378, 3.091734889403293, 1.7276866712391405, 0.9140178898033837, 0.0), # 28
(10.19072825724275, 10.043063500228623, 8.635594421582077, 9.272871720679012, 7.438805564318813, 3.6458333333333335, 4.109440490599533, 3.821131687242798, 4.037719855967078, 1.9605414837677189, 1.3946935299497027, 0.811134811766499, 0.0, 10.125, 8.922482929431489, 6.973467649748514, 5.881624451303155, 8.075439711934155, 5.349584362139917, 4.109440490599533, 2.604166666666667, 3.7194027821594067, 3.0909572402263383, 1.7271188843164156, 0.9130057727480568, 0.0), # 29
(10.199989974446497, 10.03028549382716, 8.63233024691358, 9.270182291666666, 7.441914531458824, 3.6458333333333335, 4.104507080610022, 3.8107638888888884, 4.036458333333333, 1.957947530864198, 1.39432519640853, 0.8106995884773662, 0.0, 10.125, 8.917695473251028, 6.9716259820426485, 5.873842592592593, 8.072916666666666, 5.335069444444444, 4.104507080610022, 2.604166666666667, 3.720957265729412, 3.0900607638888897, 1.7264660493827162, 0.9118441358024693, 0.0), # 30
(10.209022684174858, 10.01592312528578, 8.62865454961134, 9.267145640432098, 7.444943947328672, 3.6458333333333335, 4.09896364883402, 3.799161522633745, 4.035036316872428, 1.9550367055326936, 1.3939082283742779, 0.8102094955037343, 0.0, 10.125, 8.912304450541077, 6.969541141871389, 5.865110116598079, 8.070072633744855, 5.318826131687243, 4.09896364883402, 2.604166666666667, 3.722471973664336, 3.0890485468107003, 1.7257309099222682, 0.910538465935071, 0.0), # 31
(10.217825590600954, 10.00003675125743, 8.624581047096479, 9.263771026234568, 7.447893689561397, 3.6458333333333335, 4.092833918340999, 3.7863863168724285, 4.033458744855967, 1.951822450845908, 1.3934436234775742, 0.8096663618350862, 0.0, 10.125, 8.906329980185948, 6.96721811738787, 5.8554673525377225, 8.066917489711933, 5.3009408436214, 4.092833918340999, 2.604166666666667, 3.7239468447806985, 3.0879236754115236, 1.7249162094192958, 0.909094250114312, 0.0), # 32
(10.226397897897897, 9.98268672839506, 8.620123456790123, 9.260067708333333, 7.450763635790041, 3.6458333333333335, 4.086141612200436, 3.7725000000000004, 4.031730555555555, 1.9483182098765437, 1.392932379349046, 0.8090720164609053, 0.0, 10.125, 8.899792181069957, 6.96466189674523, 5.84495462962963, 8.06346111111111, 5.2815, 4.086141612200436, 2.604166666666667, 3.7253818178950207, 3.086689236111112, 1.724024691358025, 0.9075169753086421, 0.0), # 33
(10.23473881023881, 9.963933413351622, 8.615295496113397, 9.256044945987654, 7.453553663647644, 3.6458333333333335, 4.078910453481805, 3.7575643004115222, 4.029856687242798, 1.9445374256973027, 1.3923754936193207, 0.8084282883706753, 0.0, 10.125, 8.892711172077426, 6.961877468096604, 5.833612277091907, 8.059713374485597, 5.260590020576132, 4.078910453481805, 2.604166666666667, 3.726776831823822, 3.085348315329219, 1.7230590992226795, 0.9058121284865113, 0.0), # 34
(10.242847531796807, 9.943837162780063, 8.610110882487428, 9.25171199845679, 7.456263650767246, 3.6458333333333335, 4.071164165254579, 3.741640946502058, 4.0278420781893, 1.9404935413808875, 1.3917739639190256, 0.807737006553879, 0.0, 10.125, 8.88510707209267, 6.958869819595128, 5.821480624142661, 8.0556841563786, 5.238297325102881, 4.071164165254579, 2.604166666666667, 3.728131825383623, 3.0839039994855972, 1.7220221764974855, 0.9039851966163696, 0.0), # 35
(10.250723266745005, 9.922458333333331, 8.604583333333334, 9.247078125, 7.45889347478189, 3.6458333333333335, 4.062926470588235, 3.724791666666667, 4.025691666666666, 1.9362000000000004, 1.391128787878788, 0.8070000000000002, 0.0, 10.125, 8.877, 6.95564393939394, 5.8086, 8.051383333333332, 5.214708333333334, 4.062926470588235, 2.604166666666667, 3.729446737390945, 3.0823593750000007, 1.7209166666666669, 0.9020416666666666, 0.0), # 36
(10.258365219256524, 9.89985728166438, 8.598726566072246, 9.242152584876543, 7.4614430133246135, 3.6458333333333335, 4.054221092552247, 3.707078189300412, 4.023410390946502, 1.931670244627344, 1.3904409631292352, 0.8062190976985216, 0.0, 10.125, 8.868410074683737, 6.952204815646175, 5.79501073388203, 8.046820781893004, 5.189909465020577, 4.054221092552247, 2.604166666666667, 3.7307215066623067, 3.080717528292182, 1.7197453132144491, 0.8999870256058529, 0.0), # 37
(10.265772593504476, 9.876094364426155, 8.592554298125286, 9.23694463734568, 7.46391214402846, 3.6458333333333335, 4.04507175421609, 3.6885622427983544, 4.021003189300411, 1.92691771833562, 1.3897114873009937, 0.8053961286389272, 0.0, 10.125, 8.859357415028198, 6.948557436504967, 5.780753155006859, 8.042006378600822, 5.163987139917697, 4.04507175421609, 2.604166666666667, 3.73195607201423, 3.078981545781894, 1.7185108596250571, 0.8978267604023779, 0.0), # 38
(10.272944593661986, 9.851229938271604, 8.586080246913582, 9.231463541666667, 7.466300744526468, 3.6458333333333335, 4.035502178649238, 3.6693055555555554, 4.0184750000000005, 1.9219558641975314, 1.3889413580246914, 0.8045329218106996, 0.0, 10.125, 8.849862139917693, 6.944706790123457, 5.765867592592593, 8.036950000000001, 5.137027777777778, 4.035502178649238, 2.604166666666667, 3.733150372263234, 3.07715451388889, 1.7172160493827164, 0.8955663580246914, 0.0), # 39
(10.279880423902163, 9.82532435985368, 8.579318129858253, 9.225718557098766, 7.468608692451679, 3.6458333333333335, 4.025536088921165, 3.649369855967079, 4.015830761316872, 1.9167981252857802, 1.3881315729309558, 0.8036313062033228, 0.0, 10.125, 8.83994436823655, 6.940657864654778, 5.750394375857339, 8.031661522633744, 5.1091177983539104, 4.025536088921165, 2.604166666666667, 3.7343043462258394, 3.0752395190329227, 1.7158636259716507, 0.8932113054412438, 0.0), # 40
(10.286579288398128, 9.79843798582533, 8.57228166438043, 9.219718942901235, 7.4708358654371345, 3.6458333333333335, 4.015197208101347, 3.628816872427984, 4.0130754115226335, 1.9114579446730684, 1.3872831296504138, 0.8026931108062796, 0.0, 10.125, 8.829624218869075, 6.936415648252069, 5.734373834019204, 8.026150823045267, 5.0803436213991775, 4.015197208101347, 2.604166666666667, 3.7354179327185673, 3.073239647633746, 1.7144563328760862, 0.8907670896204848, 0.0), # 41
(10.293040391323, 9.770631172839506, 8.564984567901236, 9.213473958333335, 7.472982141115872, 3.6458333333333335, 4.004509259259259, 3.6077083333333335, 4.010213888888889, 1.9059487654320992, 1.3863970258136926, 0.8017201646090536, 0.0, 10.125, 8.818921810699589, 6.931985129068463, 5.717846296296297, 8.020427777777778, 5.050791666666667, 4.004509259259259, 2.604166666666667, 3.736491070557936, 3.0711579861111122, 1.7129969135802474, 0.8882391975308643, 0.0), # 42
(10.299262936849892, 9.741964277549155, 8.557440557841794, 9.206992862654321, 7.475047397120935, 3.6458333333333335, 3.993495965464375, 3.58610596707819, 4.007251131687243, 1.9002840306355744, 1.3854742590514195, 0.800714296601128, 0.0, 10.125, 8.807857262612407, 6.927371295257098, 5.700852091906722, 8.014502263374485, 5.020548353909466, 3.993495965464375, 2.604166666666667, 3.7375236985604676, 3.0689976208847747, 1.7114881115683587, 0.8856331161408324, 0.0), # 43
(10.305246129151927, 9.712497656607225, 8.549663351623229, 9.200284915123458, 7.477031511085363, 3.6458333333333335, 3.9821810497861696, 3.564071502057614, 4.0041920781893, 1.8944771833561962, 1.3845158269942222, 0.7996773357719861, 0.0, 10.125, 8.796450693491845, 6.92257913497111, 5.683431550068587, 8.0083841563786, 4.98970010288066, 3.9821810497861696, 2.604166666666667, 3.7385157555426813, 3.0667616383744867, 1.709932670324646, 0.8829543324188387, 0.0), # 44
(10.310989172402216, 9.682291666666666, 8.541666666666668, 9.193359375, 7.478934360642197, 3.6458333333333335, 3.9705882352941178, 3.541666666666667, 4.001041666666666, 1.8885416666666672, 1.3835227272727273, 0.798611111111111, 0.0, 10.125, 8.784722222222221, 6.917613636363637, 5.665625, 8.002083333333331, 4.958333333333334, 3.9705882352941178, 2.604166666666667, 3.7394671803210984, 3.064453125000001, 1.7083333333333335, 0.8802083333333335, 0.0), # 45
(10.31649127077388, 9.65140666438043, 8.533464220393233, 9.186225501543209, 7.480755823424477, 3.6458333333333335, 3.958741245057694, 3.518953189300412, 3.997804835390946, 1.8824909236396894, 1.3824959575175624, 0.7975174516079867, 0.0, 10.125, 8.772691967687852, 6.912479787587812, 5.647472770919067, 7.995609670781892, 4.926534465020577, 3.958741245057694, 2.604166666666667, 3.7403779117122387, 3.062075167181071, 1.7066928440786466, 0.8774006058527665, 0.0), # 46
(10.321751628440035, 9.619903006401461, 8.525069730224052, 9.178892554012345, 7.482495777065244, 3.6458333333333335, 3.9466638021463734, 3.4959927983539094, 3.994486522633745, 1.8763383973479657, 1.3814365153593549, 0.7963981862520958, 0.0, 10.125, 8.760380048773053, 6.9071825767967745, 5.629015192043896, 7.98897304526749, 4.894389917695474, 3.9466638021463734, 2.604166666666667, 3.741247888532622, 3.0596308513374493, 1.7050139460448106, 0.8745366369455876, 0.0), # 47
(10.326769449573796, 9.587841049382716, 8.516496913580248, 9.171369791666667, 7.48415409919754, 3.6458333333333335, 3.9343796296296296, 3.4728472222222226, 3.9910916666666667, 1.8700975308641978, 1.3803453984287317, 0.7952551440329219, 0.0, 10.125, 8.74780658436214, 6.901726992143659, 5.610292592592592, 7.982183333333333, 4.861986111111112, 3.9343796296296296, 2.604166666666667, 3.74207704959877, 3.05712326388889, 1.7032993827160496, 0.871621913580247, 0.0), # 48
(10.331543938348286, 9.555281149977136, 8.507759487882945, 9.163666473765433, 7.485730667454405, 3.6458333333333335, 3.9219124505769383, 3.4495781893004116, 3.987625205761317, 1.8637817672610888, 1.3792236043563206, 0.7940901539399483, 0.0, 10.125, 8.73499169333943, 6.896118021781603, 5.5913453017832655, 7.975250411522634, 4.829409465020577, 3.9219124505769383, 2.604166666666667, 3.7428653337272024, 3.054555491255145, 1.7015518975765893, 0.8686619227251944, 0.0), # 49
(10.336074298936616, 9.522283664837678, 8.49887117055327, 9.155791859567902, 7.4872253594688765, 3.6458333333333335, 3.909285988057775, 3.4262474279835393, 3.9840920781893, 1.85740454961134, 1.3780721307727481, 0.7929050449626583, 0.0, 10.125, 8.72195549458924, 6.89036065386374, 5.572213648834019, 7.9681841563786, 4.796746399176955, 3.909285988057775, 2.604166666666667, 3.7436126797344382, 3.051930619855968, 1.6997742341106543, 0.86566215134888, 0.0), # 50
(10.34035973551191, 9.488908950617283, 8.489845679012346, 9.147755208333333, 7.488638052873998, 3.6458333333333335, 3.896523965141612, 3.4029166666666666, 3.9804972222222226, 1.8509793209876546, 1.3768919753086422, 0.7917016460905352, 0.0, 10.125, 8.708718106995885, 6.884459876543211, 5.552937962962963, 7.960994444444445, 4.764083333333334, 3.896523965141612, 2.604166666666667, 3.744319026436999, 3.049251736111112, 1.6979691358024693, 0.8626280864197532, 0.0), # 51
(10.344399452247279, 9.455217363968908, 8.480696730681299, 9.139565779320987, 7.489968625302809, 3.6458333333333335, 3.883650104897926, 3.3796476337448556, 3.976845576131687, 1.8445195244627348, 1.3756841355946297, 0.7904817863130622, 0.0, 10.125, 8.695299649443683, 6.878420677973147, 5.533558573388203, 7.953691152263374, 4.731506687242798, 3.883650104897926, 2.604166666666667, 3.7449843126514044, 3.04652192644033, 1.69613934613626, 0.8595652149062645, 0.0), # 52
(10.348192653315843, 9.421269261545497, 8.471438042981255, 9.131232831790122, 7.491216954388353, 3.6458333333333335, 3.8706881303961915, 3.3565020576131688, 3.9731420781893005, 1.8380386031092826, 1.3744496092613379, 0.7892472946197227, 0.0, 10.125, 8.681720240816947, 6.872248046306688, 5.514115809327846, 7.946284156378601, 4.699102880658437, 3.8706881303961915, 2.604166666666667, 3.7456084771941764, 3.043744277263375, 1.694287608596251, 0.8564790237768635, 0.0), # 53
(10.351738542890716, 9.387125000000001, 8.462083333333332, 9.122765625, 7.492382917763668, 3.6458333333333335, 3.8576617647058824, 3.333541666666666, 3.9693916666666667, 1.8315500000000005, 1.3731893939393938, 0.788, 0.0, 10.125, 8.668, 6.865946969696969, 5.49465, 7.938783333333333, 4.666958333333333, 3.8576617647058824, 2.604166666666667, 3.746191458881834, 3.040921875000001, 1.6924166666666667, 0.8533750000000002, 0.0), # 54
(10.355036325145022, 9.352844935985367, 8.452646319158665, 9.114173418209877, 7.493466393061793, 3.6458333333333335, 3.844594730896474, 3.3108281893004117, 3.9655992798353905, 1.8250671582075908, 1.3719044872594257, 0.7867417314433777, 0.0, 10.125, 8.654159045877153, 6.859522436297127, 5.4752014746227715, 7.931198559670781, 4.6351594650205765, 3.844594730896474, 2.604166666666667, 3.7467331965308963, 3.0380578060699595, 1.6905292638317333, 0.8502586305441244, 0.0), # 55
(10.358085204251871, 9.31848942615455, 8.443140717878373, 9.105465470679011, 7.4944672579157725, 3.6458333333333335, 3.8315107520374405, 3.288423353909465, 3.961769855967078, 1.818603520804756, 1.3705958868520598, 0.7854743179393385, 0.0, 10.125, 8.640217497332722, 6.852979434260299, 5.455810562414267, 7.923539711934156, 4.603792695473251, 3.8315107520374405, 2.604166666666667, 3.7472336289578863, 3.035155156893005, 1.6886281435756747, 0.8471354023776865, 0.0), # 56
(10.360884384384383, 9.284118827160494, 8.433580246913582, 9.096651041666666, 7.495385389958644, 3.6458333333333335, 3.818433551198257, 3.2663888888888892, 3.957908333333333, 1.812172530864198, 1.369264590347924, 0.7841995884773663, 0.0, 10.125, 8.626195473251027, 6.8463229517396185, 5.436517592592593, 7.915816666666666, 4.572944444444445, 3.818433551198257, 2.604166666666667, 3.747692694979322, 3.0322170138888898, 1.6867160493827165, 0.844010802469136, 0.0), # 57
(10.36343306971568, 9.24979349565615, 8.423978623685414, 9.087739390432098, 7.496220666823449, 3.6458333333333335, 3.8053868514483984, 3.2447865226337447, 3.954019650205761, 1.8057876314586196, 1.367911595377645, 0.7829193720469442, 0.0, 10.125, 8.612113092516385, 6.8395579768882255, 5.417362894375858, 7.908039300411522, 4.5427011316872425, 3.8053868514483984, 2.604166666666667, 3.7481103334117245, 3.029246463477367, 1.684795724737083, 0.8408903177869229, 0.0), # 58
(10.36573046441887, 9.215573788294467, 8.414349565614998, 9.078739776234567, 7.49697296614323, 3.6458333333333335, 3.792394375857339, 3.2236779835390945, 3.9501087448559673, 1.799462265660723, 1.3665378995718502, 0.7816354976375554, 0.0, 10.125, 8.597990474013107, 6.83268949785925, 5.398386796982168, 7.900217489711935, 4.513149176954733, 3.792394375857339, 2.604166666666667, 3.748486483071615, 3.02624659207819, 1.6828699131229998, 0.8377794352994972, 0.0), # 59
(10.367775772667077, 9.181520061728396, 8.404706790123456, 9.069661458333334, 7.497642165551024, 3.6458333333333335, 3.779479847494553, 3.203125, 3.946180555555556, 1.7932098765432103, 1.3651445005611673, 0.7803497942386832, 0.0, 10.125, 8.583847736625515, 6.825722502805837, 5.37962962962963, 7.892361111111112, 4.484375, 3.779479847494553, 2.604166666666667, 3.748821082775512, 3.023220486111112, 1.6809413580246915, 0.8346836419753088, 0.0), # 60
(10.369568198633415, 9.147692672610884, 8.395064014631917, 9.060513695987654, 7.498228142679874, 3.6458333333333335, 3.7666669894295164, 3.183189300411523, 3.9422400205761314, 1.7870439071787843, 1.3637323959762233, 0.7790640908398111, 0.0, 10.125, 8.56970499923792, 6.818661979881115, 5.361131721536351, 7.884480041152263, 4.456465020576132, 3.7666669894295164, 2.604166666666667, 3.749114071339937, 3.0201712319958856, 1.6790128029263836, 0.8316084247828076, 0.0), # 61
(10.371106946491004, 9.114151977594878, 8.385434956561502, 9.051305748456791, 7.498730775162823, 3.6458333333333335, 3.753979524731703, 3.1639326131687247, 3.9382920781893, 1.7809778006401469, 1.3623025834476452, 0.7777802164304223, 0.0, 10.125, 8.555582380734645, 6.811512917238226, 5.3429334019204395, 7.8765841563786, 4.429505658436215, 3.753979524731703, 2.604166666666667, 3.7493653875814115, 3.0171019161522645, 1.6770869913123003, 0.8285592706904436, 0.0), # 62
(10.37239122041296, 9.080958333333333, 8.375833333333334, 9.042046875, 7.499149940632904, 3.6458333333333335, 3.741441176470588, 3.1454166666666667, 3.9343416666666666, 1.7750250000000003, 1.360856060606061, 0.7765000000000001, 0.0, 10.125, 8.5415, 6.804280303030303, 5.325075, 7.868683333333333, 4.403583333333334, 3.741441176470588, 2.604166666666667, 3.749574970316452, 3.014015625000001, 1.675166666666667, 0.8255416666666667, 0.0), # 63
(10.373420224572397, 9.048172096479195, 8.366272862368541, 9.032746334876544, 7.4994855167231655, 3.6458333333333335, 3.729075667715646, 3.127703189300412, 3.9303937242798352, 1.7691989483310475, 1.3593938250820965, 0.7752252705380279, 0.0, 10.125, 8.527477975918305, 6.796969125410483, 5.307596844993141, 7.8607874485596705, 4.378784465020577, 3.729075667715646, 2.604166666666667, 3.7497427583615828, 3.0109154449588487, 1.6732545724737085, 0.822561099679927, 0.0), # 64
(10.374193163142438, 9.015853623685413, 8.35676726108825, 9.023413387345679, 7.499737381066645, 3.6458333333333335, 3.7169067215363514, 3.1108539094650207, 3.9264531893004113, 1.7635130887059902, 1.357916874506381, 0.7739578570339887, 0.0, 10.125, 8.513536427373873, 6.7895843725319045, 5.290539266117969, 7.852906378600823, 4.355195473251029, 3.7169067215363514, 2.604166666666667, 3.7498686905333223, 3.0078044624485605, 1.67135345221765, 0.819623056698674, 0.0), # 65
(10.374709240296196, 8.984063271604938, 8.34733024691358, 9.014057291666667, 7.499905411296382, 3.6458333333333335, 3.7049580610021784, 3.094930555555556, 3.9225250000000003, 1.7579808641975312, 1.3564262065095398, 0.7726995884773664, 0.0, 10.125, 8.499695473251029, 6.782131032547699, 5.273942592592592, 7.8450500000000005, 4.332902777777778, 3.7049580610021784, 2.604166666666667, 3.749952705648191, 3.0046857638888897, 1.6694660493827165, 0.8167330246913582, 0.0), # 66
(10.374967660206792, 8.952861396890716, 8.337975537265661, 9.004687307098765, 7.499989485045419, 3.6458333333333335, 3.693253409182603, 3.0799948559670787, 3.9186140946502057, 1.7526157178783728, 1.3549228187222018, 0.7714522938576437, 0.0, 10.125, 8.485975232434079, 6.774614093611008, 5.257847153635117, 7.837228189300411, 4.31199279835391, 3.693253409182603, 2.604166666666667, 3.7499947425227096, 3.001562435699589, 1.6675951074531323, 0.8138964906264289, 0.0), # 67
(10.374791614480825, 8.922144586043629, 8.328671624942844, 8.995231305354269, 7.499918636864896, 3.645765673423767, 3.681757597414823, 3.0659766041761927, 3.9146959495503735, 1.747405110411792, 1.3533809980900628, 0.770210835158312, 0.0, 10.124875150034294, 8.47231918674143, 6.766904990450313, 5.242215331235375, 7.829391899100747, 4.29236724584667, 3.681757597414823, 2.604118338159833, 3.749959318432448, 2.99841043511809, 1.6657343249885688, 0.8111040532766937, 0.0), # 68
(10.373141706924315, 8.890975059737157, 8.319157021604937, 8.985212635869564, 7.499273783587508, 3.6452307956104257, 3.6701340906733066, 3.052124485596708, 3.910599279835391, 1.7422015976761076, 1.3516438064859118, 0.7689349144466104, 0.0, 10.12388599537037, 8.458284058912714, 6.758219032429559, 5.226604793028321, 7.821198559670782, 4.272974279835391, 3.6701340906733066, 2.6037362825788755, 3.749636891793754, 2.9950708786231885, 1.6638314043209876, 0.8082704599761052, 0.0), # 69
(10.369885787558895, 8.859209754856408, 8.309390360653863, 8.974565343196456, 7.497999542752628, 3.6441773992785653, 3.658330067280685, 3.0383135192805977, 3.9063009640298736, 1.736979881115684, 1.3496914810876801, 0.7676185634410675, 0.0, 10.121932334533609, 8.44380419785174, 6.7484574054383994, 5.210939643347051, 7.812601928059747, 4.253638926992837, 3.658330067280685, 2.6029838566275467, 3.748999771376314, 2.991521781065486, 1.6618780721307727, 0.8053827049869463, 0.0), # 70
(10.365069660642929, 8.826867654542236, 8.299375071444901, 8.963305127818035, 7.496112052502757, 3.6426225549966977, 3.646350829769494, 3.0245482777015704, 3.9018074035970125, 1.7317400898356603, 1.347531228463977, 0.7662627447677263, 0.0, 10.119039887688615, 8.428890192444989, 6.737656142319885, 5.195220269506979, 7.803614807194025, 4.234367588782199, 3.646350829769494, 2.6018732535690696, 3.7480560262513785, 2.987768375939346, 1.6598750142889804, 0.8024425140492942, 0.0), # 71
(10.358739130434783, 8.793967741935482, 8.289114583333333, 8.95144769021739, 7.493627450980392, 3.6405833333333337, 3.634201680672269, 3.0108333333333333, 3.897125, 1.7264823529411768, 1.3451702551834133, 0.7648684210526316, 0.0, 10.115234375, 8.413552631578947, 6.7258512759170666, 5.179447058823529, 7.79425, 4.215166666666667, 3.634201680672269, 2.600416666666667, 3.746813725490196, 2.983815896739131, 1.6578229166666667, 0.7994516129032258, 0.0), # 72
(10.35094000119282, 8.760529000176998, 8.27861232567444, 8.939008730877617, 7.490561876328034, 3.638076804856983, 3.621887922521546, 2.9971732586495965, 3.8922601547020275, 1.7212067995373737, 1.3426157678145982, 0.7634365549218266, 0.0, 10.110541516632374, 8.397802104140093, 6.71307883907299, 5.163620398612119, 7.784520309404055, 4.196042562109435, 3.621887922521546, 2.598626289183559, 3.745280938164017, 2.979669576959206, 1.655722465134888, 0.7964117272888181, 0.0), # 73
(10.341718077175404, 8.726570412407629, 8.267871727823502, 8.926003950281803, 7.486931466688183, 3.6351200401361585, 3.609414857849861, 2.9835726261240665, 3.8872192691662857, 1.7159135587293908, 1.3398749729261428, 0.7619681090013557, 0.0, 10.104987032750344, 8.38164919901491, 6.699374864630713, 5.147740676188171, 7.774438538332571, 4.177001676573693, 3.609414857849861, 2.5965143143829703, 3.7434657333440917, 2.975334650093935, 1.6535743455647005, 0.7933245829461482, 0.0), # 74
(10.331119162640901, 8.692110961768218, 8.256896219135802, 8.912449048913043, 7.482752360203341, 3.6317301097393697, 3.59678778918975, 2.9700360082304527, 3.8820087448559666, 1.7106027596223679, 1.336955077086656, 0.7604640459172624, 0.0, 10.098596643518519, 8.365104505089885, 6.684775385433279, 5.131808278867102, 7.764017489711933, 4.158050411522634, 3.59678778918975, 2.594092935528121, 3.7413761801016703, 2.9708163496376816, 1.6513792438271604, 0.7901919056152927, 0.0), # 75
(10.319189061847677, 8.65716963139962, 8.245689228966622, 8.898359727254428, 7.478040695016003, 3.6279240842351275, 3.5840120190737474, 2.956567977442463, 3.876634983234263, 1.7052745313214452, 1.3338632868647486, 0.7589253282955902, 0.0, 10.091396069101508, 8.348178611251491, 6.669316434323743, 5.115823593964334, 7.753269966468526, 4.139195168419449, 3.5840120190737474, 2.5913743458822336, 3.7390203475080015, 2.96611990908481, 1.6491378457933243, 0.7870154210363293, 0.0), # 76
(10.305973579054093, 8.621765404442675, 8.234254186671238, 8.883751685789049, 7.472812609268672, 3.6237190341919425, 3.5710928500343897, 2.9431731062338065, 3.871104385764365, 1.699929002931763, 1.3306068088290313, 0.7573529187623839, 0.0, 10.083411029663925, 8.330882106386222, 6.653034044145156, 5.099787008795288, 7.74220877152873, 4.120442348727329, 3.5710928500343897, 2.58837073870853, 3.736406304634336, 2.9612505619296834, 1.6468508373342476, 0.7837968549493343, 0.0), # 77
(10.291518518518519, 8.585917264038233, 8.222594521604938, 8.868640625, 7.467084241103849, 3.6191320301783265, 3.5580355846042124, 2.9298559670781894, 3.8654233539094642, 1.6945663035584608, 1.327192849548113, 0.7557477799436866, 0.0, 10.074667245370371, 8.313225579380552, 6.635964247740564, 5.083698910675381, 7.7308467078189285, 4.101798353909466, 3.5580355846042124, 2.585094307270233, 3.7335421205519244, 2.956213541666667, 1.6445189043209878, 0.7805379330943849, 0.0), # 78
(10.275869684499314, 8.549644193327138, 8.210713663123, 8.85304224537037, 7.460871728664031, 3.61418014276279, 3.5448455253157505, 2.916621132449322, 3.859598289132754, 1.6891865623066789, 1.3236286155906039, 0.7541108744655421, 0.0, 10.065190436385459, 8.295219619120962, 6.618143077953018, 5.067559686920035, 7.719196578265508, 4.083269585429051, 3.5448455253157505, 2.5815572448305644, 3.7304358643320157, 2.951014081790124, 1.6421427326246, 0.7772403812115581, 0.0), # 79
(10.259072881254847, 8.51296517545024, 8.198615040580703, 8.836972247383253, 7.454191210091719, 3.6088804425138448, 3.5315279747015405, 2.9034731748209115, 3.853635592897424, 1.683789908281557, 1.3199213135251149, 0.7524431649539947, 0.0, 10.0550063228738, 8.27687481449394, 6.599606567625574, 5.05136972484467, 7.707271185794848, 4.064862444749276, 3.5315279747015405, 2.577771744652746, 3.7270956050458595, 2.945657415794418, 1.639723008116141, 0.7739059250409311, 0.0), # 80
(10.241173913043479, 8.475899193548386, 8.186302083333333, 8.82044633152174, 7.447058823529411, 3.60325, 3.5180882352941176, 2.890416666666667, 3.8475416666666664, 1.6783764705882358, 1.3160781499202554, 0.7507456140350878, 0.0, 10.044140624999999, 8.258201754385965, 6.580390749601277, 5.035129411764706, 7.695083333333333, 4.046583333333333, 3.5180882352941176, 2.57375, 3.7235294117647055, 2.940148777173914, 1.6372604166666667, 0.7705362903225808, 0.0), # 81
(10.222218584123576, 8.438465230762423, 8.17377822073617, 8.803480198268922, 7.43949070711961, 3.5973058857897686, 3.504531609626018, 2.8774561804602956, 3.841322911903673, 1.6729463783318543, 1.3121063313446355, 0.7490191843348656, 0.0, 10.03261906292867, 8.23921102768352, 6.560531656723177, 5.018839134995561, 7.682645823807346, 4.0284386526444145, 3.504531609626018, 2.5695042041355487, 3.719745353559805, 2.934493399422974, 1.634755644147234, 0.767133202796584, 0.0), # 82
(10.202252698753504, 8.400682270233196, 8.16104688214449, 8.78608954810789, 7.431502999004814, 3.591065170451659, 3.4908634002297765, 2.8645962886755068, 3.8349857300716352, 1.6674997606175532, 1.3080130643668657, 0.7472648384793719, 0.0, 10.020467356824417, 8.219913223273089, 6.540065321834328, 5.002499281852659, 7.6699714601432705, 4.01043480414571, 3.4908634002297765, 2.5650465503226134, 3.715751499502407, 2.9286965160359637, 1.632209376428898, 0.7636983882030178, 0.0), # 83
(10.181322061191626, 8.362569295101553, 8.14811149691358, 8.768290081521739, 7.423111837327523, 3.584544924554184, 3.477088909637929, 2.851841563786008, 3.8285365226337444, 1.6620367465504726, 1.3038055555555557, 0.7454835390946503, 0.0, 10.007711226851852, 8.200318930041153, 6.519027777777778, 4.986110239651417, 7.657073045267489, 3.9925781893004113, 3.477088909637929, 2.5603892318244172, 3.7115559186637617, 2.922763360507247, 1.629622299382716, 0.7602335722819594, 0.0), # 84
(10.159472475696308, 8.32414528850834, 8.13497549439872, 8.75009749899356, 7.414333360230238, 3.577762218665854, 3.463213440383012, 2.8391965782655086, 3.8219816910531925, 1.6565574652357518, 1.2994910114793157, 0.7436762488067449, 0.0, 9.994376393175584, 8.180438736874192, 6.497455057396579, 4.969672395707254, 7.643963382106385, 3.9748752095717124, 3.463213440383012, 2.5555444419041815, 3.707166680115119, 2.916699166331187, 1.626995098879744, 0.7567404807734855, 0.0), # 85
(10.136749746525913, 8.285429233594407, 8.121642303955191, 8.731527501006443, 7.405183705855455, 3.57073412335518, 3.44924229499756, 2.826665904587715, 3.815327636793172, 1.6510620457785314, 1.2950766387067558, 0.7418439302416996, 0.0, 9.98048857596022, 8.160283232658694, 6.475383193533778, 4.953186137335593, 7.630655273586344, 3.9573322664228017, 3.44924229499756, 2.550524373825129, 3.7025918529277275, 2.910509167002148, 1.6243284607910382, 0.7532208394176735, 0.0), # 86
(10.113199677938807, 8.246440113500597, 8.10811535493827, 8.712595788043478, 7.3956790123456795, 3.563477709190672, 3.4351807760141093, 2.8142541152263374, 3.8085807613168727, 1.645550617283951, 1.290569643806486, 0.7399875460255577, 0.0, 9.96607349537037, 8.139863006281134, 6.452848219032429, 4.936651851851852, 7.6171615226337455, 3.9399557613168725, 3.4351807760141093, 2.54534122085048, 3.6978395061728397, 2.904198596014493, 1.6216230709876542, 0.7496763739545999, 0.0), # 87
(10.088868074193357, 8.207196911367758, 8.094398076703246, 8.693318060587762, 7.385835417843406, 3.5560100467408424, 3.4210341859651954, 2.801965782655083, 3.8017474660874866, 1.6400233088571508, 1.2859772333471164, 0.7381080587843638, 0.0, 9.951156871570646, 8.119188646628, 6.429886166735582, 4.9200699265714505, 7.603494932174973, 3.9227520957171165, 3.4210341859651954, 2.540007176243459, 3.692917708921703, 2.897772686862588, 1.6188796153406495, 0.7461088101243417, 0.0), # 88
(10.063800739547922, 8.16771861033674, 8.080493898605397, 8.673710019122383, 7.375669060491138, 3.5483482065742016, 3.406807827383354, 2.7898054793476605, 3.794834152568206, 1.634480249603271, 1.2813066138972575, 0.7362064311441613, 0.0, 9.935764424725651, 8.098270742585774, 6.4065330694862865, 4.903440748809812, 7.589668305136412, 3.905727671086725, 3.406807827383354, 2.534534433267287, 3.687834530245569, 2.891236673040795, 1.6160987797210793, 0.7425198736669765, 0.0), # 89
(10.03804347826087, 8.128024193548386, 8.06640625, 8.653787364130435, 7.365196078431373, 3.5405092592592595, 3.3925070028011204, 2.7777777777777777, 3.7878472222222226, 1.6289215686274514, 1.2765649920255184, 0.7342836257309943, 0.0, 9.919921875, 8.077119883040936, 6.382824960127592, 4.886764705882353, 7.575694444444445, 3.888888888888889, 3.3925070028011204, 2.5289351851851856, 3.6825980392156863, 2.884595788043479, 1.6132812500000002, 0.7389112903225807, 0.0), # 90
(10.011642094590563, 8.088132644143545, 8.05213856024234, 8.63356579609501, 7.35443260980661, 3.532510275364528, 3.378137014751031, 2.7658872504191434, 3.780793076512727, 1.6233473950348318, 1.2717595743005101, 0.7323406051709063, 0.0, 9.903654942558298, 8.055746656879968, 6.35879787150255, 4.870042185104494, 7.561586153025454, 3.872242150586801, 3.378137014751031, 2.5232216252603767, 3.677216304903305, 2.8778552653650036, 1.6104277120484682, 0.7352847858312315, 0.0), # 91
(9.984642392795372, 8.048062945263066, 8.0376942586877, 8.613061015499195, 7.343394792759352, 3.524368325458518, 3.363703165765621, 2.754138469745466, 3.773678116902911, 1.6177578579305527, 1.2668975672908422, 0.7303783320899415, 0.0, 9.886989347565157, 8.034161652989356, 6.334487836454211, 4.853273573791657, 7.547356233805822, 3.8557938576436523, 3.363703165765621, 2.517405946756084, 3.671697396379676, 2.871020338499732, 1.6075388517375402, 0.7316420859330061, 0.0), # 92
(9.957090177133654, 8.00783408004779, 8.023076774691358, 8.592288722826089, 7.332098765432098, 3.5161004801097393, 3.349210758377425, 2.742536008230453, 3.766508744855967, 1.6121530864197533, 1.261986177565125, 0.7283977691141434, 0.0, 9.869950810185184, 8.012375460255576, 6.309930887825625, 4.836459259259259, 7.533017489711934, 3.839550411522634, 3.349210758377425, 2.5115003429355283, 3.666049382716049, 2.86409624094203, 1.6046153549382718, 0.727984916367981, 0.0), # 93
(9.92903125186378, 7.967465031638567, 8.008289537608597, 8.571264618558777, 7.320560665967347, 3.5077238098867043, 3.3346650951189805, 2.7310844383478132, 3.759291361835086, 1.6065332096075746, 1.2570326116919686, 0.7263998788695563, 0.0, 9.85256505058299, 7.990398667565118, 6.285163058459842, 4.819599628822722, 7.518582723670172, 3.823518213686939, 3.3346650951189805, 2.5055170070619317, 3.6602803329836733, 2.8570882061862592, 1.6016579075217197, 0.7243150028762335, 0.0), # 94
(9.90051142124411, 7.926974783176247, 7.993335976794697, 8.550004403180354, 7.308796632507598, 3.499255385357923, 3.320071478522822, 2.719788332571255, 3.7520323693034596, 1.6008983565991557, 1.2520440762399827, 0.7243856239822234, 0.0, 9.834857788923182, 7.968241863804456, 6.260220381199914, 4.8026950697974655, 7.504064738606919, 3.8077036655997567, 3.320071478522822, 2.4994681323985164, 3.654398316253799, 2.850001467726785, 1.5986671953589393, 0.7206340711978407, 0.0), # 95
(9.871576489533012, 7.886382317801674, 7.978219521604939, 8.528523777173913, 7.296822803195352, 3.4907122770919066, 3.3054352111214853, 2.708652263374486, 3.7447381687242793, 1.5952486564996373, 1.247027777777778, 0.7223559670781895, 0.0, 9.816854745370371, 7.945915637860083, 6.23513888888889, 4.785745969498911, 7.489476337448559, 3.7921131687242804, 3.3054352111214853, 2.4933659122085046, 3.648411401597676, 2.8428412590579715, 1.595643904320988, 0.7169438470728796, 0.0), # 96
(9.842272260988848, 7.845706618655694, 7.962943601394604, 8.506838441022543, 7.284655316173109, 3.482111555657166, 3.2907615954475067, 2.697680803231215, 3.7374151615607376, 1.589584238414159, 1.2419909228739638, 0.7203118707834976, 0.0, 9.798581640089164, 7.923430578618472, 6.209954614369819, 4.768752715242476, 7.474830323121475, 3.7767531245237014, 3.2907615954475067, 2.4872225397551184, 3.6423276580865545, 2.8356128136741816, 1.5925887202789208, 0.7132460562414268, 0.0), # 97
(9.812644539869984, 7.804966668879153, 7.947511645518976, 8.48496409520934, 7.272310309583368, 3.4734702916222124, 3.276055934033421, 2.68687852461515, 3.7300697492760246, 1.5839052314478608, 1.236940718097151, 0.7182542977241916, 0.0, 9.78006419324417, 7.900797274966106, 6.184703590485755, 4.751715694343581, 7.460139498552049, 3.7616299344612103, 3.276055934033421, 2.48105020830158, 3.636155154791684, 2.8283213650697805, 1.589502329103795, 0.7095424244435595, 0.0), # 98
(9.782739130434782, 7.764181451612902, 7.931927083333334, 8.462916440217391, 7.259803921568627, 3.464805555555556, 3.261323529411765, 2.67625, 3.7227083333333333, 1.5782117647058826, 1.2318843700159492, 0.7161842105263159, 0.0, 9.761328125, 7.878026315789473, 6.159421850079745, 4.734635294117647, 7.445416666666667, 3.7467500000000005, 3.261323529411765, 2.474861111111111, 3.6299019607843137, 2.820972146739131, 1.5863854166666669, 0.7058346774193549, 0.0), # 99
(9.752601836941611, 7.723369949997786, 7.916193344192958, 8.44071117652979, 7.247152290271389, 3.4561344180257074, 3.2465696841150726, 2.665799801859473, 3.715337315195854, 1.572503967293365, 1.2268290851989685, 0.714102571815914, 0.0, 9.742399155521262, 7.8551282899750525, 6.134145425994841, 4.717511901880093, 7.430674630391708, 3.732119722603262, 3.2465696841150726, 2.468667441446934, 3.6235761451356945, 2.8135703921765973, 1.5832386688385918, 0.7021245409088898, 0.0), # 100
(9.722278463648834, 7.682551147174654, 7.900313857453133, 8.41836400462963, 7.234371553834153, 3.4474739496011786, 3.231799700675881, 2.6555325026672763, 3.7079630963267793, 1.5667819683154474, 1.2217820702148188, 0.7120103442190294, 0.0, 9.723303004972564, 7.832113786409323, 6.108910351074094, 4.7003459049463405, 7.415926192653559, 3.7177455037341867, 3.231799700675881, 2.4624813925722706, 3.6171857769170765, 2.806121334876544, 1.5800627714906266, 0.6984137406522414, 0.0), # 101
(9.691814814814816, 7.641744026284349, 7.884292052469135, 8.395890625, 7.221477850399419, 3.4388412208504806, 3.217018881626725, 2.645452674897119, 3.7005920781893, 1.56104589687727, 1.2167505316321108, 0.7099084903617069, 0.0, 9.704065393518519, 7.808993393978774, 6.083752658160553, 4.683137690631809, 7.4011841563786, 3.703633744855967, 3.217018881626725, 2.4563151577503435, 3.6107389251997093, 2.798630208333334, 1.5768584104938272, 0.6947040023894864, 0.0), # 102
(9.661256694697919, 7.60096757046772, 7.8681313585962505, 8.373306738123993, 7.208487318109686, 3.430253302342123, 3.20223252950014, 2.63556489102271, 3.6932306622466085, 1.5552958820839726, 1.211741676019454, 0.7077979728699895, 0.0, 9.68471204132373, 7.785777701569883, 6.058708380097269, 4.6658876462519165, 7.386461324493217, 3.689790847431794, 3.20223252950014, 2.4501809302443736, 3.604243659054843, 2.7911022460413317, 1.5736262717192502, 0.6909970518607019, 0.0), # 103
(9.63064990755651, 7.560240762865614, 7.851835205189758, 8.350628044484703, 7.195416095107452, 3.421727264644617, 3.187445946828663, 2.6258737235177567, 3.685885249961896, 1.5495320530406955, 1.2067627099454585, 0.7056797543699213, 0.0, 9.665268668552812, 7.762477298069133, 6.033813549727292, 4.648596159122086, 7.371770499923792, 3.6762232129248593, 3.187445946828663, 2.4440909033175835, 3.597708047553726, 2.783542681494901, 1.5703670410379515, 0.687294614805965, 0.0), # 104
(9.600040257648953, 7.519582586618876, 7.835407021604938, 8.327870244565217, 7.182280319535221, 3.4132801783264752, 3.172664436144829, 2.6163837448559675, 3.6785622427983538, 1.5437545388525786, 1.201820839978735, 0.7035547974875461, 0.0, 9.64576099537037, 7.739102772363006, 6.009104199893674, 4.631263616557734, 7.3571244855967075, 3.662937242798354, 3.172664436144829, 2.4380572702331964, 3.5911401597676105, 2.775956748188406, 1.5670814043209877, 0.6835984169653525, 0.0), # 105
(9.569473549233614, 7.479012024868357, 7.818850237197074, 8.305049038848631, 7.1690961295354905, 3.404929113956206, 3.1578932999811724, 2.6070995275110502, 3.6712680422191735, 1.5379634686247616, 1.1969232726878927, 0.701424064848908, 0.0, 9.626214741941014, 7.715664713337986, 5.9846163634394625, 4.613890405874283, 7.342536084438347, 3.6499393385154706, 3.1578932999811724, 2.4320922242544327, 3.5845480647677452, 2.768349679616211, 1.5637700474394147, 0.6799101840789417, 0.0), # 106
(9.538995586568856, 7.438548060754901, 7.802168281321446, 8.282180127818036, 7.155879663250759, 3.3966911421023225, 3.1431378408702306, 2.5980256439567144, 3.6640090496875475, 1.532158971462385, 1.1920772146415421, 0.6992885190800504, 0.0, 9.606655628429355, 7.692173709880553, 5.96038607320771, 4.596476914387154, 7.328018099375095, 3.6372359015394005, 3.1431378408702306, 2.426207958644516, 3.5779398316253794, 2.760726709272679, 1.5604336562642893, 0.6762316418868093, 0.0), # 107
(9.508652173913044, 7.398209677419356, 7.785364583333334, 8.259279211956523, 7.1426470588235285, 3.3885833333333335, 3.1284033613445374, 2.589166666666667, 3.656791666666667, 1.5263411764705888, 1.1872898724082936, 0.6971491228070177, 0.0, 9.587109375, 7.668640350877193, 5.936449362041468, 4.579023529411765, 7.313583333333334, 3.624833333333334, 3.1284033613445374, 2.4204166666666667, 3.5713235294117642, 2.7530930706521746, 1.557072916666667, 0.6725645161290325, 0.0), # 108
(9.478489115524543, 7.358015858002567, 7.768442572588021, 8.23636199174718, 7.129414454396299, 3.3806227582177515, 3.113695163936631, 2.580527168114617, 3.6496222946197223, 1.5205102127545123, 1.1825684525567568, 0.6950068386558532, 0.0, 9.567601701817559, 7.645075225214384, 5.9128422627837836, 4.561530638263536, 7.299244589239445, 3.612738035360464, 3.113695163936631, 2.4147305415841083, 3.5647072271981495, 2.7454539972490606, 1.5536885145176043, 0.668910532545688, 0.0), # 109
(9.448552215661715, 7.317985585645383, 7.751405678440788, 8.213444167673108, 7.116197988111569, 3.3728264873240867, 3.0990185511790447, 2.5721117207742723, 3.6425073350099066, 1.5146662094192962, 1.177920161655542, 0.6928626292526012, 0.0, 9.54815832904664, 7.621488921778612, 5.8896008082777085, 4.543998628257887, 7.285014670019813, 3.600956409083981, 3.0990185511790447, 2.409161776660062, 3.5580989940557846, 2.737814722557703, 1.5502811356881578, 0.6652714168768531, 0.0), # 110
(9.41888727858293, 7.278137843488651, 7.7342573302469155, 8.190541440217391, 7.103013798111837, 3.365211591220851, 3.0843788256043156, 2.5639248971193416, 3.635453189300412, 1.5088092955700803, 1.173352206273259, 0.6907174572233054, 0.0, 9.528804976851852, 7.597892029456357, 5.866761031366295, 4.526427886710239, 7.270906378600824, 3.5894948559670783, 3.0843788256043156, 2.4037225651577505, 3.5515068990559184, 2.7301804800724643, 1.546851466049383, 0.6616488948626047, 0.0), # 111
(9.38954010854655, 7.238491614673214, 7.717000957361684, 8.167669509863124, 7.089878022539605, 3.357795140476554, 3.069781289744979, 2.5559712696235333, 3.628466258954427, 1.5029396003120044, 1.1688717929785184, 0.6885722851940093, 0.0, 9.509567365397805, 7.574295137134101, 5.844358964892591, 4.5088188009360115, 7.256932517908854, 3.5783597774729463, 3.069781289744979, 2.3984251003403956, 3.5449390112698027, 2.7225565032877084, 1.543400191472337, 0.6580446922430195, 0.0), # 112
(9.360504223703044, 7.1991320672204555, 7.699681523543391, 8.14487541186903, 7.076783786782469, 3.3505906987084666, 3.0552629818283847, 2.548271903658586, 3.6215709370862066, 1.4970761841531826, 1.1644873176921446, 0.6864327447087024, 0.0, 9.490443900843221, 7.550760191795725, 5.8224365884607225, 4.491228552459547, 7.243141874172413, 3.5675806651220205, 3.0552629818283847, 2.3932790705060474, 3.5383918933912346, 2.7149584706230105, 1.5399363047086783, 0.654466551565496, 0.0), # 113
(9.331480897900065, 7.16044741823174, 7.682538062518016, 8.122342065958001, 7.063595569710884, 3.343581854975776, 3.0410091042052896, 2.5409213581271333, 3.6148730119043533, 1.491328791978196, 1.1602073895188663, 0.684326014342748, 0.0, 9.471275414160035, 7.5275861577702265, 5.801036947594331, 4.473986375934587, 7.229746023808707, 3.557289901377987, 3.0410091042052896, 2.3882727535541255, 3.531797784855442, 2.7074473553193346, 1.5365076125036032, 0.6509497652937947, 0.0), # 114
(9.302384903003995, 7.122451598792792, 7.665580777256098, 8.100063378886334, 7.050271785259067, 3.3367503822909463, 3.027029825095781, 2.533917772616129, 3.6083749928895963, 1.4857063319970194, 1.1560257519045158, 0.6822531318799043, 0.0, 9.452006631660376, 7.5047844506789465, 5.7801287595225785, 4.457118995991058, 7.216749985779193, 3.5474848816625806, 3.027029825095781, 2.3833931302078186, 3.5251358926295335, 2.700021126295445, 1.5331161554512198, 0.647495599890254, 0.0), # 115
(9.273179873237634, 7.0850892578507265, 7.648776824986561, 8.077999612699802, 7.036792350922519, 3.330080178417474, 3.0133024087639466, 2.5272417970412473, 3.6020604464092765, 1.480198339612387, 1.1519343218785802, 0.6802102664572789, 0.0, 9.43260725975589, 7.482312931030067, 5.7596716093929015, 4.44059501883716, 7.204120892818553, 3.5381385158577463, 3.0133024087639466, 2.3786286988696244, 3.5183961754612594, 2.6926665375666015, 1.5297553649973124, 0.6440990234409752, 0.0), # 116
(9.243829442823772, 7.04830504435266, 7.632093362938321, 8.056111029444182, 7.02313718419674, 3.323555141118853, 2.9998041194738763, 2.5208740813181603, 3.5959129388307343, 1.4747943502270324, 1.1479250164705472, 0.6781935872119792, 0.0, 9.413047004858225, 7.46012945933177, 5.739625082352736, 4.424383050681096, 7.1918258776614685, 3.5292237138454245, 2.9998041194738763, 2.3739679579420376, 3.51156859209837, 2.6853703431480613, 1.5264186725876645, 0.6407550040320601, 0.0), # 117
(9.214297245985211, 7.0120436072457135, 7.615497548340306, 8.03435789116525, 7.009286202577227, 3.317159168158581, 2.9865122214896576, 2.51479527536254, 3.5899160365213114, 1.46948389924369, 1.143989752709904, 0.6761992632811126, 0.0, 9.393295573379024, 7.438191896092237, 5.71994876354952, 4.40845169773107, 7.179832073042623, 3.5207133855075567, 2.9865122214896576, 2.369399405827558, 3.5046431012886137, 2.678119297055084, 1.5230995096680613, 0.6374585097496104, 0.0), # 118
(9.184546916944742, 6.976249595477001, 7.598956538421437, 8.012700459908778, 6.99521932355948, 3.3108761573001524, 2.973403979075378, 2.5089860290900607, 3.5840533058483475, 1.4642565220650932, 1.1401204476261382, 0.6742234638017862, 0.0, 9.373322671729932, 7.416458101819647, 5.70060223813069, 4.392769566195279, 7.168106611696695, 3.5125804407260848, 2.973403979075378, 2.3649115409286803, 3.49760966177974, 2.670900153302927, 1.5197913076842873, 0.6342045086797276, 0.0), # 119
(9.154542089925162, 6.940867657993644, 7.582437490410635, 7.991098997720545, 6.980916464638998, 3.304690006307063, 2.9604566564951265, 2.5034269924163928, 3.578308313179186, 1.4591017540939766, 1.136309018248736, 0.6722623579111081, 0.0, 9.353098006322597, 7.394885937022188, 5.68154509124368, 4.377305262281929, 7.156616626358372, 3.50479778938295, 2.9604566564951265, 2.360492861647902, 3.490458232319499, 2.663699665906849, 1.516487498082127, 0.6309879689085133, 0.0), # 120
(9.124246399149268, 6.90584244374276, 7.565907561536823, 7.969513766646325, 6.966357543311279, 3.29858461294281, 2.94764751801299, 2.4980988152572112, 3.572664624881166, 1.4540091307330743, 1.1325473816071863, 0.6703121147461852, 0.0, 9.33259128356866, 7.373433262208036, 5.662736908035931, 4.362027392199222, 7.145329249762332, 3.497338341360096, 2.94764751801299, 2.356131866387721, 3.4831787716556395, 2.656504588882109, 1.5131815123073646, 0.6278038585220692, 0.0), # 121
(9.093623478839854, 6.871118601671464, 7.549333909028926, 7.947905028731892, 6.951522477071823, 3.292543874970886, 2.9349538278930587, 2.492982147528187, 3.5671058073216297, 1.4489681873851195, 1.1288274547309753, 0.6683689034441251, 0.0, 9.31177220987977, 7.352057937885375, 5.644137273654876, 4.346904562155357, 7.1342116146432595, 3.490175006539462, 2.9349538278930587, 2.351817053550633, 3.4757612385359113, 2.6493016762439643, 1.5098667818057854, 0.6246471456064968, 0.0), # 122
(9.062636963219719, 6.836640780726876, 7.532683690115864, 7.92623304602302, 6.936391183416127, 3.28655169015479, 2.9223528503994194, 2.4880576391449933, 3.5616154268679177, 1.443968459452847, 1.1251411546495909, 0.6664288931420351, 0.0, 9.290610491667572, 7.330717824562385, 5.625705773247954, 4.33190537835854, 7.123230853735835, 3.4832806948029904, 2.9223528503994194, 2.3475369215391355, 3.4681955917080636, 2.642077682007674, 1.5065367380231727, 0.621512798247898, 0.0), # 123
(9.031250486511654, 6.802353629856113, 7.515924062026559, 7.90445808056549, 6.920943579839691, 3.2805919562580144, 2.9098218497961597, 2.483305940023303, 3.5561770498873715, 1.4389994823389904, 1.1214803983925201, 0.664488252977023, 0.0, 9.269075835343711, 7.309370782747252, 5.6074019919625995, 4.316998447016971, 7.112354099774743, 3.476628316032624, 2.9098218497961597, 2.3432799687557244, 3.4604717899198456, 2.634819360188497, 1.5031848124053118, 0.618395784532374, 0.0), # 124
(8.999427682938459, 6.768201798006293, 7.499022181989936, 7.88254039440507, 6.905159583838015, 3.274648571044058, 2.8973380903473696, 2.478707700078788, 3.5507742427473308, 1.4340507914462837, 1.1178371029892504, 0.6625431520861957, 0.0, 9.247137947319828, 7.2879746729481525, 5.5891855149462515, 4.30215237433885, 7.1015484854946616, 3.470190780110303, 2.8973380903473696, 2.3390346936028985, 3.4525797919190073, 2.6275134648016905, 1.4998044363979874, 0.6152910725460268, 0.0), # 125
(8.967132186722928, 6.734129934124536, 7.481945207234916, 7.8604402495875405, 6.889019112906595, 3.2687054322764144, 2.884878836317135, 2.474243569227122, 3.545390571815139, 1.4291119221774609, 1.1142031854692689, 0.6605897596066612, 0.0, 9.224766534007578, 7.266487355673273, 5.571015927346345, 4.287335766532382, 7.090781143630278, 3.463940996917971, 2.884878836317135, 2.334789594483153, 3.4445095564532977, 2.620146749862514, 1.4963890414469831, 0.6121936303749579, 0.0), # 126
(8.93432763208786, 6.7000826871579555, 7.464660294990421, 7.838117908158674, 6.8725020845409315, 3.26274643771858, 2.872421351969547, 2.469894197383977, 3.5400096034581354, 1.4241724099352562, 1.1105705628620632, 0.6586242446755264, 0.0, 9.201931301818599, 7.244866691430789, 5.552852814310316, 4.272517229805768, 7.080019206916271, 3.457851876337568, 2.872421351969547, 2.3305331697989855, 3.4362510422704657, 2.612705969386225, 1.4929320589980841, 0.6090984261052688, 0.0), # 127
(8.900977653256046, 6.666004706053673, 7.447134602485375, 7.815533632164248, 6.855588416236526, 3.2567554851340508, 2.859942901568691, 2.465640234465026, 3.534614904043661, 1.4192217901224033, 1.1069311521971208, 0.6566427764298991, 0.0, 9.178601957164537, 7.223070540728888, 5.534655760985604, 4.257665370367209, 7.069229808087322, 3.4518963282510366, 2.859942901568691, 2.3262539179528936, 3.427794208118263, 2.6051778773880834, 1.4894269204970751, 0.6060004278230613, 0.0), # 128
(8.867045884450281, 6.631840639758805, 7.4293352869486995, 7.792647683650037, 6.838258025488874, 3.250716472286322, 2.8474207493786565, 2.4614623303859418, 3.529190039939058, 1.4142495981416365, 1.1032768705039286, 0.6546415240068865, 0.0, 9.154748206457038, 7.20105676407575, 5.516384352519642, 4.242748794424909, 7.058380079878116, 3.4460472625403185, 2.8474207493786565, 2.321940337347373, 3.419129012744437, 2.597549227883346, 1.4858670573897401, 0.6028946036144368, 0.0), # 129
(8.832495959893366, 6.5975351372204685, 7.411229505609316, 7.769420324661814, 6.820490829793475, 3.2446132969388883, 2.8348321596635313, 2.457341135062396, 3.5237185775116666, 1.4092453693956895, 1.0995996348119743, 0.6526166565435961, 0.0, 9.130339756107748, 7.178783221979556, 5.4979981740598705, 4.2277361081870675, 7.047437155023333, 3.4402775890873545, 2.8348321596635313, 2.3175809263849203, 3.4102454148967376, 2.589806774887272, 1.4822459011218634, 0.5997759215654973, 0.0), # 130
(8.797291513808094, 6.563032847385783, 7.392784415696151, 7.7458118172453565, 6.802266746645829, 3.238429856855247, 2.8221543966874045, 2.4532572984100627, 3.5181840831288285, 1.4041986392872965, 1.0958913621507447, 0.6505643431771354, 0.0, 9.105346312528312, 7.156207774948489, 5.479456810753724, 4.212595917861889, 7.036368166257657, 3.4345602177740875, 2.8221543966874045, 2.3131641834680337, 3.4011333733229145, 2.5819372724151193, 1.4785568831392302, 0.596639349762344, 0.0), # 131
(8.76139618041726, 6.528278419201865, 7.373967174438122, 7.72178242344644, 6.783565693541435, 3.2321500497988933, 2.8093647247143627, 2.449191470344614, 3.5125701231578845, 1.3990989432191914, 1.0921439695497275, 0.6484807530446118, 0.0, 9.079737582130376, 7.13328828349073, 5.460719847748638, 4.1972968296575734, 7.025140246315769, 3.4288680584824593, 2.8093647247143627, 2.3086786069992096, 3.3917828467707176, 2.573927474482147, 1.4747934348876244, 0.5934798562910787, 0.0), # 132
(8.724773593943663, 6.493216501615832, 7.354744939064153, 7.697292405310838, 6.764367587975791, 3.225757773533322, 2.7964404080084946, 2.445124300781722, 3.5068602639661752, 1.3939358165941083, 1.0883493740384103, 0.6463620552831327, 0.0, 9.053483271325586, 7.10998260811446, 5.44174687019205, 4.181807449782324, 7.0137205279323505, 3.4231740210944106, 2.7964404080084946, 2.3041126953809443, 3.3821837939878954, 2.5657641351036133, 1.4709489878128308, 0.590292409237803, 0.0), # 133
(8.687387388610095, 6.457791743574804, 7.33508486680317, 7.672302024884328, 6.7446523474443945, 3.2192369258220297, 2.7833587108338893, 2.44103643963706, 3.5010380719210428, 1.388698794814781, 1.0844994926462799, 0.6442044190298056, 0.0, 9.026553086525583, 7.0862486093278605, 5.422497463231399, 4.166096384444343, 7.0020761438420855, 3.417451015491884, 2.7833587108338893, 2.2994549470157355, 3.3723261737221972, 2.557434008294776, 1.4670169733606342, 0.5870719766886187, 0.0), # 134
(8.649201198639354, 6.421948794025897, 7.314954114884091, 7.646771544212684, 6.724399889442747, 3.212571404428512, 2.770096897454634, 2.4369085368263, 3.4950871133898262, 1.3833774132839443, 1.0805862424028239, 0.6420040134217377, 0.0, 8.99891673414202, 7.0620441476391145, 5.402931212014119, 4.150132239851832, 6.9901742267796525, 3.41167195155682, 2.770096897454634, 2.2946938603060802, 3.3621999447213735, 2.548923848070895, 1.4629908229768183, 0.583813526729627, 0.0), # 135
(8.610178658254235, 6.385632301916229, 7.294319840535841, 7.62066122534168, 6.703590131466344, 3.205745107116265, 2.7566322321348173, 2.4327212422651154, 3.4889909547398688, 1.3779612074043308, 1.0766015403375297, 0.6397570075960368, 0.0, 8.970543920586536, 7.037327083556404, 5.383007701687648, 4.133883622212991, 6.9779819094797375, 3.4058097391711617, 2.7566322321348173, 2.289817933654475, 3.351795065733172, 2.540220408447227, 1.4588639681071682, 0.58051202744693, 0.0), # 136
(8.570283401677534, 6.348786916192918, 7.273149200987342, 7.593931330317094, 6.682202991010689, 3.1987419316487826, 2.7429419791385277, 2.428455205869179, 3.4827331623385107, 1.3724397125786756, 1.0725373034798844, 0.63745957068981, 0.0, 8.941404352270776, 7.012055277587909, 5.362686517399421, 4.117319137736026, 6.965466324677021, 3.3998372882168506, 2.7429419791385277, 2.284815665463416, 3.3411014955053444, 2.5313104434390317, 1.4546298401974684, 0.577162446926629, 0.0), # 137
(8.529479063132047, 6.311357285803083, 7.251409353467515, 7.566542121184698, 6.660218385571278, 3.1915457757895624, 2.729003402729852, 2.4240910775541624, 3.4762973025530934, 1.3668024642097119, 1.0683854488593754, 0.6351078718401649, 0.0, 8.91146773560639, 6.986186590241813, 5.341927244296877, 4.100407392629135, 6.952594605106187, 3.3937275085758274, 2.729003402729852, 2.2796755541354017, 3.330109192785639, 2.5221807070615663, 1.450281870693503, 0.5737597532548258, 0.0), # 138
(8.487729276840568, 6.273288059693839, 7.229067455205284, 7.538453859990269, 6.63761623264361, 3.184140537302099, 2.7147937671728797, 2.4196095072357395, 3.469666941750957, 1.3610389977001744, 1.0641378935054902, 0.6326980801842089, 0.0, 8.880703777005019, 6.959678882026297, 5.32068946752745, 4.083116993100523, 6.939333883501914, 3.3874533101300353, 2.7147937671728797, 2.274386098072928, 3.318808116321805, 2.51281795333009, 1.4458134910410567, 0.5702989145176218, 0.0), # 139
(8.444997677025897, 6.234523886812306, 7.206090663429573, 7.509626808779583, 6.614376449723186, 3.176510113949888, 2.7002903367316984, 2.4149911448295818, 3.462825646299444, 1.3551388484527966, 1.0597865544477159, 0.6302263648590494, 0.0, 8.849082182878314, 6.932490013449542, 5.298932772238579, 4.0654165453583895, 6.925651292598888, 3.3809876027614147, 2.7002903367316984, 2.2689357956784915, 3.307188224861593, 2.5032089362598615, 1.4412181326859146, 0.5667748988011189, 0.0), # 140
(8.40124789791083, 6.195009416105602, 7.1824461353693, 7.480021229598415, 6.590478954305501, 3.1686384034964257, 2.6854703756703975, 2.4102166402513627, 3.455756982565893, 1.349091551870313, 1.0553233487155398, 0.6276888950017938, 0.0, 8.816572659637913, 6.904577845019731, 5.276616743577699, 4.047274655610939, 6.911513965131786, 3.3743032963519077, 2.6854703756703975, 2.26331314535459, 3.2952394771527507, 2.4933404098661387, 1.4364892270738603, 0.5631826741914184, 0.0), # 141
(8.356443573718156, 6.154689296520844, 7.158101028253392, 7.44959738449254, 6.565903663886058, 3.1605093037052074, 2.670311148253063, 2.4052666434167547, 3.448444516917647, 1.3428866433554572, 1.0507401933384497, 0.6250818397495496, 0.0, 8.783144913695466, 6.875900237245045, 5.253700966692247, 4.028659930066371, 6.896889033835294, 3.3673733007834565, 2.670311148253063, 2.2575066455037196, 3.282951831943029, 2.4831991281641805, 1.4316202056506786, 0.5595172087746222, 0.0), # 142
(8.310548338670674, 6.113508177005149, 7.133022499310772, 7.418315535507731, 6.540630495960352, 3.152106712339729, 2.6547899187437842, 2.4001218042414303, 3.4408718157220486, 1.3365136583109634, 1.0460290053459322, 0.6224013682394242, 0.0, 8.748768651462617, 6.846415050633665, 5.230145026729661, 4.009540974932889, 6.881743631444097, 3.360170525938002, 2.6547899187437842, 2.251504794528378, 3.270315247980176, 2.472771845169244, 1.4266044998621543, 0.5557734706368318, 0.0), # 143
(8.263525826991184, 6.071410706505636, 7.107177705770357, 7.386135944689768, 6.514639368023886, 3.1434145271634857, 2.6388839514066493, 2.3947627726410623, 3.4330224453464364, 1.3299621321395652, 1.0411817017674754, 0.619643649608525, 0.0, 8.713413579351014, 6.816080145693774, 5.205908508837376, 3.9898863964186946, 6.866044890692873, 3.3526678816974873, 2.6388839514066493, 2.245296090831061, 3.257319684011943, 2.4620453148965895, 1.4214355411540713, 0.5519464278641489, 0.0), # 144
(8.215339672902477, 6.0283415339694235, 7.080533804861075, 7.353018874084421, 6.487910197572155, 3.134416645939974, 2.6225705105057466, 2.3891701985313234, 3.424879972158151, 1.3232216002439972, 1.036190199632566, 0.6168048529939595, 0.0, 8.6770494037723, 6.784853382933553, 5.180950998162829, 3.969664800731991, 6.849759944316302, 3.344838277943853, 2.6225705105057466, 2.238869032814267, 3.2439550987860777, 2.451006291361474, 1.4161067609722149, 0.548031048542675, 0.0), # 145
(8.16595351062735, 5.984245308343629, 7.053057953811847, 7.318924585737469, 6.460422902100661, 3.1250969664326886, 2.605826860305165, 2.3833247318278863, 3.4164279625245353, 1.3162815980269928, 1.0310464159706916, 0.6138811475328351, 0.0, 8.639645831138118, 6.7526926228611845, 5.155232079853457, 3.948844794080978, 6.832855925049071, 3.3366546245590407, 2.605826860305165, 2.2322121188804918, 3.2302114510503306, 2.439641528579157, 1.4106115907623695, 0.5440223007585119, 0.0), # 146
(8.1153309743886, 5.93906667857537, 7.024717309851591, 7.283813341694685, 6.4321573991049, 3.1154393864051255, 2.5886302650689905, 2.3772070224464232, 3.40764998281293, 1.3091316608912866, 1.0257422678113395, 0.6108687023622593, 0.0, 8.601172567860118, 6.719555725984851, 5.1287113390566965, 3.9273949826738592, 6.81529996562586, 3.3280898314249923, 2.5886302650689905, 2.2253138474322327, 3.21607869955245, 2.4279377805648954, 1.4049434619703185, 0.5399151525977609, 0.0), # 147
(8.063435698409021, 5.892750293611764, 6.9954790302092364, 7.247645404001847, 6.403093606080374, 3.105427803620781, 2.5709579890613132, 2.3707977203026074, 3.398529599390676, 1.301761324239612, 1.0202696721839972, 0.6077636866193392, 0.0, 8.561599320349941, 6.68540055281273, 5.101348360919985, 3.905283972718835, 6.797059198781352, 3.3191168084236504, 2.5709579890613132, 2.2181627168719866, 3.201546803040187, 2.4158818013339496, 1.3990958060418472, 0.535704572146524, 0.0), # 148
(8.010231316911412, 5.845240802399927, 6.965310272113703, 7.210381034704727, 6.37321144052258, 3.0950461158431497, 2.5527872965462204, 2.3640774753121114, 3.3890503786251127, 1.2941601234747035, 1.0146205461181517, 0.6045622694411826, 0.0, 8.520895795019237, 6.650184963853008, 5.073102730590758, 3.88248037042411, 6.778100757250225, 3.3097084654369557, 2.5527872965462204, 2.21074722560225, 3.18660572026129, 2.403460344901576, 1.3930620544227408, 0.5313855274909026, 0.0), # 149
(7.955681464118564, 5.796482853886981, 6.934178192793912, 7.171980495849104, 6.342490819927017, 3.0842782208357287, 2.5340954517878003, 2.3570269373906068, 3.3791958868835836, 1.2863175939992944, 1.0087868066432906, 0.601260619964897, 0.0, 8.479031698279647, 6.6138668196138655, 5.043934033216452, 3.8589527819978824, 6.758391773767167, 3.2998377123468496, 2.5340954517878003, 2.2030558720255207, 3.1712454099635083, 2.390660165283035, 1.3868356385587826, 0.5269529867169983, 0.0), # 150
(7.899749774253275, 5.746421097020041, 6.902049949478785, 7.132404049480748, 6.310911661789184, 3.0731080163620113, 2.5148597190501416, 2.3496267564537683, 3.3689496905334293, 1.2782232712161197, 1.002760370788901, 0.5978549073275894, 0.0, 8.435976736542818, 6.576403980603482, 5.013801853944504, 3.8346698136483583, 6.737899381066859, 3.2894774590352753, 2.5148597190501416, 2.1950771545442938, 3.155455830894592, 2.377468016493583, 1.3804099898957571, 0.5224019179109128, 0.0), # 151
(7.842399881538343, 5.6950001807462245, 6.868892699397251, 7.091611957645439, 6.278453883604579, 3.0615194001854955, 2.4950573625973322, 2.3418575824172674, 3.3582953559419897, 1.2698666905279126, 0.9965331555844703, 0.5943413006663675, 0.0, 8.391700616220398, 6.537754307330042, 4.982665777922351, 3.809600071583737, 6.716590711883979, 3.2786006153841742, 2.4950573625973322, 2.1867995715610684, 3.1392269418022893, 2.36387065254848, 1.3737785398794504, 0.5177272891587478, 0.0), # 152
(7.78359542019656, 5.642164754012652, 6.834673599778224, 7.049564482388949, 6.245097402868703, 3.049496270069676, 2.4746656466934596, 2.333700065196776, 3.3472164494766075, 1.2612373873374074, 0.9900970780594861, 0.5907159691183387, 0.0, 8.346173043724027, 6.497875660301725, 4.95048539029743, 3.783712162012222, 6.694432898953215, 3.2671800912754865, 2.4746656466934596, 2.17821162147834, 3.1225487014343516, 2.3498548274629836, 1.3669347199556448, 0.5129240685466048, 0.0), # 153
(7.723300024450729, 5.587859465766439, 6.7993598078506325, 7.006221885757057, 6.210822137077053, 3.0370225237780484, 2.453661835602614, 2.325134854707968, 3.3356965375046217, 1.2523248970473384, 0.9834440552434354, 0.5869750818206104, 0.0, 8.299363725465357, 6.456725900026714, 4.917220276217177, 3.7569746911420143, 6.671393075009243, 3.2551887965911552, 2.453661835602614, 2.169301802698606, 3.1054110685385266, 2.335407295252353, 1.3598719615701265, 0.5079872241605854, 0.0), # 154
(7.6614773285236355, 5.532028964954703, 6.762918480843396, 6.961544429795533, 6.175608003725131, 3.0240820590741087, 2.4320231935888805, 2.316142600866515, 3.323719186393376, 1.2431187550604388, 0.9765660041658056, 0.5831148079102902, 0.0, 8.251242367856026, 6.414262887013191, 4.882830020829028, 3.7293562651813157, 6.647438372786752, 3.242599641213121, 2.4320231935888805, 2.160058613624363, 3.0878040018625654, 2.320514809931845, 1.3525836961686795, 0.5029117240867913, 0.0), # 155
(7.598090966638081, 5.474617900524564, 6.725316775985439, 6.915492376550157, 6.139434920308432, 3.0106587737213526, 2.40972698491635, 2.3067039535880913, 3.3112679625102084, 1.2336084967794434, 0.9694548418560842, 0.5791313165244852, 0.0, 8.201778677307685, 6.370444481769337, 4.84727420928042, 3.7008254903383295, 6.622535925020417, 3.2293855350233276, 2.40972698491635, 2.150470552658109, 3.069717460154216, 2.3051641255167192, 1.3450633551970879, 0.49769253641132405, 0.0), # 156
(7.533104573016862, 5.415570921423138, 6.686521850505682, 6.868025988066703, 6.102282804322456, 2.9967365654832747, 2.3867504738491094, 2.2967995627883675, 3.2983264322224626, 1.2237836576070855, 0.9621024853437583, 0.5750207768003032, 0.0, 8.150942360231976, 6.325228544803333, 4.810512426718791, 3.671350972821256, 6.596652864444925, 3.2155193879037145, 2.3867504738491094, 2.140526118202339, 3.051141402161228, 2.2893419960222348, 1.3373043701011365, 0.4923246292202853, 0.0), # 157
(7.464680946405239, 5.353748694041236, 6.644659961585297, 6.817327186238432, 6.062454070580665, 2.9814309445183143, 2.3625533604639286, 2.285748730145572, 3.2838873638663655, 1.213341479072786, 0.9542659587564906, 0.570633297016195, 0.0, 8.096485859415345, 6.276966267178143, 4.771329793782452, 3.640024437218358, 6.567774727732731, 3.200048222203801, 2.3625533604639286, 2.129593531798796, 3.0312270352903323, 2.2724423954128112, 1.3289319923170593, 0.48670442673102154, 0.0), # 158
(7.382286766978402, 5.282809876299521, 6.58894818200249, 6.7529828690913405, 6.010127539854418, 2.95965229467081, 2.334106381692858, 2.2696723053184926, 3.2621424204073812, 1.2005702485246865, 0.9445694892698324, 0.5651135436402591, 0.0, 8.025427646920194, 6.216248980042849, 4.722847446349162, 3.601710745574059, 6.5242848408147625, 3.17754122744589, 2.334106381692858, 2.114037353336293, 3.005063769927209, 2.250994289697114, 1.3177896364004982, 0.4802554432999565, 0.0), # 159
(7.284872094904309, 5.202172001162321, 6.51826746496324, 6.673933132806645, 5.94428008756453, 2.9308657560278157, 2.301121874191892, 2.248166328969728, 3.2324750757428835, 1.1853014129657236, 0.9328765847682567, 0.5583751624073207, 0.0, 7.93642060889358, 6.142126786480525, 4.664382923841283, 3.55590423889717, 6.464950151485767, 3.147432860557619, 2.301121874191892, 2.0934755400198686, 2.972140043782265, 2.2246443776022153, 1.3036534929926482, 0.47292472737839286, 0.0), # 160
(7.17322205458596, 5.11236079574043, 6.4333724765919245, 6.5809293778175455, 5.865595416188075, 2.895420057582683, 2.263840723003438, 2.2215002221290754, 3.1952765889996724, 1.1676645482927346, 0.9192902757666179, 0.5504806224089643, 0.0, 7.830374044819097, 6.055286846498606, 4.596451378833089, 3.5029936448782033, 6.390553177999345, 3.1101003109807053, 2.263840723003438, 2.0681571839876307, 2.9327977080940375, 2.1936431259391824, 1.2866744953183848, 0.46476007234003913, 0.0), # 161
(7.048121770426357, 5.013901987144635, 6.335017883012913, 6.474723004557244, 5.7747572282021356, 2.853663928328766, 2.2225038131699044, 2.1899434058263343, 3.150938219304545, 1.147789230402558, 0.9039135927797701, 0.5414923927367745, 0.0, 7.708197254180333, 5.956416320104519, 4.519567963898851, 3.4433676912076736, 6.30187643860909, 3.065920768156868, 2.2225038131699044, 2.03833137737769, 2.8873786141010678, 2.158241001519082, 1.2670035766025827, 0.4558092715586033, 0.0), # 162
(6.9103563668284975, 4.90732130248573, 6.223958350350585, 6.35606541345895, 5.672449226083792, 2.8059460972594175, 2.1773520297337003, 2.153765301091302, 3.0998512257843016, 1.1258050351920315, 0.8868495663225682, 0.5314729424823361, 0.0, 7.570799536460879, 5.846202367305696, 4.43424783161284, 3.3774151055760937, 6.199702451568603, 3.015271421527823, 2.1773520297337003, 2.0042472123281554, 2.836224613041896, 2.118688471152984, 1.2447916700701172, 0.4461201184077937, 0.0), # 163
(6.760710968195384, 4.793144468874502, 6.100948544729314, 6.225708004955863, 5.559355112310126, 2.752615293367992, 2.128626257737233, 2.113235328953779, 3.0424068675657407, 1.1018415385579923, 0.8682012269098661, 0.5204847407372336, 0.0, 7.419090191144328, 5.725332148109569, 4.34100613454933, 3.305524615673976, 6.0848137351314815, 2.9585294605352903, 2.128626257737233, 1.9661537809771372, 2.779677556155063, 2.075236001651955, 1.2201897089458629, 0.43574040626131844, 0.0), # 164
(6.599970698930017, 4.671897213421746, 5.966743132273474, 6.084402179481189, 5.436158589358215, 2.694020245647842, 2.076567382222911, 2.068622910443561, 2.9789964037756596, 1.0760283163972786, 0.8480716050565187, 0.5085902565930517, 0.0, 7.25397851771427, 5.594492822523568, 4.2403580252825925, 3.2280849491918353, 5.957992807551319, 2.8960720746209856, 2.076567382222911, 1.9243001754627442, 2.7180792946791077, 2.0281340598270634, 1.1933486264546949, 0.42471792849288603, 0.0), # 165
(6.428920683435397, 4.54410526323825, 5.82209677910744, 5.932899337468126, 5.3035433597051425, 2.630509683092322, 2.021416288233143, 2.020197466590449, 2.9100110935408576, 1.0484949446067282, 0.8265637312773799, 0.49585195914137514, 0.0, 7.0763738156542955, 5.454371550555126, 4.1328186563869, 3.145484833820184, 5.820022187081715, 2.8282764532266285, 2.021416288233143, 1.8789354879230868, 2.6517716798525712, 1.9776331124893758, 1.1644193558214881, 0.41310047847620457, 0.0), # 166
(6.248346046114523, 4.410294345434805, 5.667764151355587, 5.771950879349882, 5.1621931258279865, 2.562432334694784, 1.9634138608103373, 1.9682284184242402, 2.835842195988133, 1.0193709990831787, 0.8037806360873045, 0.48233231747378824, 0.0, 6.887185384447996, 5.30565549221167, 4.0189031804365225, 3.058112997249536, 5.671684391976266, 2.755519785793936, 1.9634138608103373, 1.8303088104962744, 2.5810965629139933, 1.9239836264499612, 1.1335528302711175, 0.4009358495849823, 0.0), # 167
(6.059031911370395, 4.270990187122201, 5.50449991514229, 5.60230820555966, 5.012791590203827, 2.490136929448583, 1.902800984996902, 1.9129851869747332, 2.7568809702442847, 0.9887860557234682, 0.7798253500011468, 0.468093800681876, 0.0, 6.6873225235789615, 5.149031807500635, 3.8991267500057343, 2.9663581671704042, 5.513761940488569, 2.6781792617646265, 1.902800984996902, 1.7786692353204163, 2.5063957951019136, 1.867436068519887, 1.100899983028458, 0.3882718351929274, 0.0), # 168
(5.861763403606015, 4.1267185154112305, 5.333058736591924, 5.4247227165306615, 4.856022455309747, 2.413972196347072, 1.8398185458352458, 1.8547371932717271, 2.6735186754361124, 0.9568696904244344, 0.7548009035337614, 0.45319887785722274, 0.0, 6.477694532530785, 4.985187656429449, 3.774004517668807, 2.8706090712733023, 5.347037350872225, 2.596632070580418, 1.8398185458352458, 1.724265854533623, 2.4280112276548733, 1.808240905510221, 1.066611747318385, 0.3751562286737483, 0.0), # 169
(5.657325647224384, 3.978005057412684, 5.154195281828863, 5.23994581269609, 4.692569423622822, 2.334286864383604, 1.7747074283677764, 1.7937538583450197, 2.5861465706904125, 0.9237514790829147, 0.7288103272000027, 0.4377100180914133, 0.0, 6.259210710787055, 4.814810199005545, 3.6440516360000137, 2.7712544372487433, 5.172293141380825, 2.5112554016830275, 1.7747074283677764, 1.6673477602740028, 2.346284711811411, 1.7466486042320304, 1.0308390563657726, 0.36163682340115316, 0.0), # 170
(5.4465037666285, 3.82537554023735, 4.968664216977482, 5.048728894489152, 4.523116197620137, 2.2514296625515327, 1.7077085176369027, 1.7303046032244096, 2.495155915133985, 0.8895609975957474, 0.7019566515147247, 0.4216896904760322, 0.0, 6.032780357831365, 4.638586595236354, 3.509783257573624, 2.6686829927872413, 4.99031183026797, 2.4224264445141737, 1.7077085176369027, 1.6081640446796661, 2.2615580988100685, 1.6829096314963843, 0.9937328433954964, 0.3477614127488501, 0.0), # 171
(5.230082886221365, 3.6693556909960217, 4.777220208162156, 4.851823362343048, 4.348346479778769, 2.1657493198442115, 1.6390626986850327, 1.664658848939696, 2.4009379678936282, 0.8544278218597702, 0.6743429069927823, 0.4052003641026643, 0.0, 5.799312773147303, 4.457204005129307, 3.3717145349639117, 2.56328346557931, 4.8018759357872565, 2.3305223885155746, 1.6390626986850327, 1.5469637998887225, 2.1741732398893845, 1.6172744541143496, 0.9554440416324312, 0.3335777900905475, 0.0), # 172
(5.00884813040598, 3.510471236799489, 4.58061792150726, 4.649980616690982, 4.168943972575801, 2.077594565254994, 1.5690108565545748, 1.5970860165206766, 2.303883988096141, 0.8184815277718206, 0.6460721241490297, 0.3883045080628938, 0.0, 5.5597172562184625, 4.271349588691831, 3.2303606207451483, 2.4554445833154612, 4.607767976192282, 2.235920423128947, 1.5690108565545748, 1.483996118039281, 2.0844719862879004, 1.5499935388969943, 0.916123584301452, 0.31913374879995354, 0.0), # 173
(4.783584623585344, 3.349247904758541, 4.3796120231371685, 4.443952057966156, 3.9855923784883105, 1.987314127777233, 1.4977938762879377, 1.5278555269971503, 2.204385234868321, 0.7818516912287369, 0.6172473334983214, 0.37106459144830567, 0.0, 5.314903106528433, 4.081710505931362, 3.0862366674916064, 2.34555507368621, 4.408770469736642, 2.1389977377960103, 1.4977938762879377, 1.4195100912694523, 1.9927961892441552, 1.4813173526553853, 0.8759224046274336, 0.3044770822507765, 0.0), # 174
(4.555077490162455, 3.18621142198397, 4.174957179176257, 4.2344890866017755, 3.7989753999933793, 1.8952567364042834, 1.425652642927529, 1.457236801398915, 2.102832967336968, 0.7446678881273562, 0.5879715655555117, 0.35354308335048457, 0.0, 5.0657796235608075, 3.8889739168553294, 2.939857827777558, 2.234003664382068, 4.205665934673936, 2.040131521958481, 1.425652642927529, 1.3537548117173452, 1.8994876999966896, 1.411496362200592, 0.8349914358352515, 0.28965558381672457, 0.0), # 175
(4.324111854540319, 3.0218875155865668, 3.9674080557488987, 4.0223431030310435, 3.609776739568087, 1.8017711201294973, 1.3528280415157574, 1.3854992607557703, 1.9996184446288805, 0.7070596943645169, 0.558347850835455, 0.33580245286101496, 0.0, 4.813256106799174, 3.693826981471164, 2.791739254177275, 2.1211790830935504, 3.999236889257761, 1.9396989650580787, 1.3528280415157574, 1.2869793715210696, 1.8048883697840434, 1.3407810343436815, 0.7934816111497798, 0.2747170468715061, 0.0), # 176
(4.0914728411219325, 2.856801912677122, 3.7577193189794698, 3.808265507687162, 3.4186800996895155, 1.7072060079462288, 1.2795609570950313, 1.3129123260975137, 1.8951329258708567, 0.6691566858370562, 0.528479219853006, 0.3179051690714816, 0.0, 4.5582418557271245, 3.496956859786297, 2.6423960992650297, 2.0074700575111684, 3.7902658517417134, 1.838077256536519, 1.2795609570950313, 1.2194328628187348, 1.7093400498447577, 1.269421835895721, 0.751543863795894, 0.25970926478882933, 0.0), # 177
(3.8579455743102966, 2.6914803403664256, 3.5466456349923448, 3.593007701003337, 3.226369182834742, 1.6119101288478317, 1.2060922747077587, 1.239745418453944, 1.7897676701896952, 0.6310884384418126, 0.49846870312301883, 0.299913701073469, 0.0, 4.301646169828252, 3.299050711808158, 2.4923435156150937, 1.8932653153254375, 3.5795353403793904, 1.7356435858355217, 1.2060922747077587, 1.1513643777484512, 1.613184591417371, 1.1976692336677792, 0.7093291269984691, 0.24468003094240237, 0.0), # 178
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 179
)
passenger_allighting_rate = (
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 0
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 1
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 2
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 3
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 4
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 5
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 6
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 7
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 8
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 9
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 10
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 11
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 12
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 13
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 14
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 15
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 16
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 17
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 18
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 19
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 20
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 21
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 22
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 23
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 24
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 25
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 26
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 27
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 28
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 29
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 30
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 31
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 32
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 33
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 34
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 35
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 36
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 37
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 38
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 39
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 40
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 41
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 42
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 43
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 44
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 45
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 46
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 47
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 48
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 49
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 50
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 51
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 52
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 53
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 54
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 55
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 56
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 57
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 58
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 59
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 60
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 61
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 62
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 63
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 64
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 65
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 66
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 67
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 68
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 69
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 70
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 71
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 72
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 73
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 74
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 75
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 76
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 77
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 78
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 79
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 80
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 81
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 82
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 83
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 84
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 85
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 86
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 87
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 88
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 89
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 90
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 91
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 92
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 93
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 94
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 95
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 96
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 97
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 98
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 99
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 100
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 101
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 102
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 103
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 104
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 105
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 106
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 107
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 108
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 109
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 110
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 111
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 112
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 113
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 114
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 115
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 116
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 117
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 118
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 119
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 120
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 121
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 122
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 123
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 124
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 125
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 126
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 127
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 128
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 129
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 130
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 131
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 132
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 133
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 134
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 135
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 136
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 137
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 138
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 139
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 140
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 141
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 142
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 143
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 144
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 145
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 146
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 147
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 148
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 149
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 150
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 151
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 152
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 153
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 154
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 155
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 156
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 157
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 158
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 159
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 160
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 161
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 162
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 163
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 164
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 165
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 166
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 167
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 168
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 169
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 170
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 171
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 172
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 173
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 174
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 175
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 176
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 177
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 178
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 179
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 8991598675325360468762009371570610170
#index for seed sequence child
child_seed_index = (
1, # 0
73, # 1
)
| 276.272727
| 494
| 0.769568
| 32,987
| 258,315
| 6.02601
| 0.217389
| 0.358587
| 0.344099
| 0.651977
| 0.376064
| 0.367869
| 0.364946
| 0.364131
| 0.364051
| 0.364051
| 0
| 0.849817
| 0.095751
| 258,315
| 934
| 495
| 276.568522
| 0.001194
| 0.015528
| 0
| 0.200873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.005459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d3df2aee3e9083811285cf5664025fae378db142
| 199
|
py
|
Python
|
nmigen/hdl/mem.py
|
psumesh/nmigen
|
7d611b8fc1d9e58853ff268ec38ff8f4131a9774
|
[
"BSD-2-Clause"
] | 528
|
2020-01-28T18:21:00.000Z
|
2021-12-09T06:27:51.000Z
|
nmigen/hdl/mem.py
|
psumesh/nmigen
|
7d611b8fc1d9e58853ff268ec38ff8f4131a9774
|
[
"BSD-2-Clause"
] | 360
|
2020-01-28T18:34:30.000Z
|
2021-12-10T08:03:32.000Z
|
nmigen/hdl/mem.py
|
psumesh/nmigen
|
7d611b8fc1d9e58853ff268ec38ff8f4131a9774
|
[
"BSD-2-Clause"
] | 100
|
2020-02-06T21:55:46.000Z
|
2021-11-25T19:20:44.000Z
|
from amaranth.hdl.mem import *
from amaranth.hdl.mem import __all__
import warnings
warnings.warn("instead of nmigen.hdl.mem, use amaranth.hdl.mem",
DeprecationWarning, stacklevel=2)
| 24.875
| 64
| 0.743719
| 27
| 199
| 5.333333
| 0.555556
| 0.166667
| 0.291667
| 0.25
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006024
| 0.165829
| 199
| 7
| 65
| 28.428571
| 0.861446
| 0
| 0
| 0
| 0
| 0
| 0.236181
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
312109b4f3e3ca41ac70893ad2ff4f0aeb173241
| 26,710
|
py
|
Python
|
testing/unit/tp/atomic_swap/test_init.py
|
FerrySchuller/remme-core
|
ca58bfcc5ff0ce6d15c2871a4e03e39f1268d789
|
[
"Apache-2.0"
] | 129
|
2018-02-13T21:37:13.000Z
|
2020-11-01T23:33:52.000Z
|
testing/unit/tp/atomic_swap/test_init.py
|
FerrySchuller/remme-core
|
ca58bfcc5ff0ce6d15c2871a4e03e39f1268d789
|
[
"Apache-2.0"
] | 95
|
2018-03-27T15:57:36.000Z
|
2019-08-26T07:35:23.000Z
|
testing/unit/tp/atomic_swap/test_init.py
|
FerrySchuller/remme-core
|
ca58bfcc5ff0ce6d15c2871a4e03e39f1268d789
|
[
"Apache-2.0"
] | 30
|
2018-02-24T15:17:37.000Z
|
2020-11-14T11:35:25.000Z
|
"""
Provide tests for atomic swap handler initialization method implementation.
"""
import datetime
import time
import pytest
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.protobuf.processor_pb2 import TpProcessRequest
from sawtooth_sdk.protobuf.setting_pb2 import Setting
from sawtooth_sdk.protobuf.transaction_pb2 import (
Transaction,
TransactionHeader,
)
from testing.conftest import create_signer
from testing.mocks.stub import StubContext
from testing.utils.client import proto_error_msg
from remme.clients.block_info import (
CONFIG_ADDRESS,
BlockInfoClient,
)
from remme.protos.account_pb2 import Account
from remme.protos.atomic_swap_pb2 import (
AtomicSwapInfo,
AtomicSwapInitPayload,
AtomicSwapMethod,
)
from remme.protos.block_info_pb2 import BlockInfo, BlockInfoConfig
from remme.protos.transaction_pb2 import TransactionPayload
from remme.shared.utils import hash512
from remme.settings import (
SETTINGS_KEY_ZERO_ADDRESS_OWNERS,
SETTINGS_SWAP_COMMISSION,
ZERO_ADDRESS,
)
from remme.settings.helper import _make_settings_key
from remme.tp.atomic_swap import AtomicSwapHandler
from remme.tp.basic import BasicHandler
TOKENS_AMOUNT_TO_SWAP = 200
SWAP_COMMISSION_AMOUNT = 100
BOT_ETHEREUM_ADDRESS = '0xe6ca0e7c974f06471759e9a05d18b538c5ced11e'
BOT_PRIVATE_KEY = '1cb15ecfe1b3dc02df0003ac396037f85b98cf9f99b0beae000dc5e9e8b6dab4'
BOT_PUBLIC_KEY = '03ecc5cb4094eb05319be6c7a63ebf17133d4ffaea48cdcfd1d5fc79dac7db7b6b'
BOT_ADDRESS = '112007b9433e1da5c624ff926477141abedfd57585a36590b0a8edc4104ef28093ee30'
ALICE_ETHEREUM_ADDRESS = '0x8dfe0f55a1cf9b22b8c85a9ff7a85a28a3879f71'
ALICE_ADDRESS = '112007db8a00c010402e2e3a7d03491323e761e0ea612481c518605648ceeb5ed454f7'
ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR = '0x6f4d5666332f5a575a714d4245624455612f2b4345424f704b4256704f5'
BOT_IT_IS_INITIATOR_MARK = ''
SWAP_ID = '033102e41346242476b15a3a7966eb5249271025fc7fb0b37ed3fdb4bcce3884'
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY = _make_settings_key(SETTINGS_SWAP_COMMISSION)
ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY = _make_settings_key(SETTINGS_KEY_ZERO_ADDRESS_OWNERS)
ADDRESS_TO_STORE_SWAP_INFO_BY = BasicHandler(
name=AtomicSwapHandler().family_name, versions=AtomicSwapHandler()._family_versions[0]
).make_address_from_data(data=SWAP_ID)
TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS = {
'family_name': AtomicSwapHandler().family_name,
'family_version': AtomicSwapHandler()._family_versions[0],
}
RANDOM_NODE_PUBLIC_KEY = '039d6881f0a71d05659e1f40b443684b93c7b7c504ea23ea8949ef5216a2236940'
RANDOM_PUBLIC_KEY = '8c87d914a6cfeaf027413760ad359b5a56bfe0eda504d879b21872c7dc5b911c'
CURRENT_TIMESTAMP = int(datetime.datetime.now().timestamp())
BLOCK_INFO_CONFIG_ADDRESS = CONFIG_ADDRESS
BLOCK_INFO_ADDRESS = BlockInfoClient.create_block_address(1000)
block_info_config = BlockInfoConfig()
block_info_config.latest_block = 1000
SERIALIZED_BLOCK_INFO_CONFIG = block_info_config.SerializeToString()
block_info = BlockInfo()
block_info.timestamp = CURRENT_TIMESTAMP
SERIALIZED_BLOCK_INFO = block_info.SerializeToString()
INPUTS = [
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY,
BLOCK_INFO_CONFIG_ADDRESS,
BLOCK_INFO_ADDRESS,
BOT_ADDRESS,
ZERO_ADDRESS,
ADDRESS_TO_STORE_SWAP_INFO_BY,
]
OUTPUTS = [
ADDRESS_TO_STORE_SWAP_INFO_BY,
ZERO_ADDRESS,
BOT_ADDRESS,
]
def test_atomic_swap_init_with_empty_proto():
"""
Case: send empty proto for init
Expect: invalid transaction error
"""
inputs = outputs = [
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY,
BLOCK_INFO_CONFIG_ADDRESS,
BLOCK_INFO_ADDRESS,
BOT_ADDRESS,
ZERO_ADDRESS,
ADDRESS_TO_STORE_SWAP_INFO_BY,
ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY,
]
atomic_swap_init_payload = AtomicSwapInitPayload()
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=inputs,
outputs=outputs,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
mock_context = StubContext(inputs=inputs, outputs=outputs, initial_state={})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert proto_error_msg(
AtomicSwapInitPayload,
{
'receiver_address': ['Missed address'],
'sender_address_non_local': ['This field is required.'],
'amount': ['This field is required.'],
'swap_id': ['Missed swap_id'],
'created_at': ['This field is required.'],
}
) == str(error.value)
def test_atomic_swap_init():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens.
Expect: bot sends commission to the zero account address, swap amount is decreased from bot account.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
bot_account = Account()
bot_account.balance = 5000
serialized_bot_account = bot_account.SerializeToString()
zero_account = Account()
zero_account.balance = 0
serialized_zero_account = zero_account.SerializeToString()
swap_commission_setting = Setting()
swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION, value=str(SWAP_COMMISSION_AMOUNT))
serialized_swap_commission_setting = swap_commission_setting.SerializeToString()
genesis_members_setting = Setting()
genesis_members_setting.entries.add(key=SETTINGS_KEY_ZERO_ADDRESS_OWNERS, value=f'{BOT_PUBLIC_KEY},')
serialized_genesis_members_setting = genesis_members_setting.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
BOT_ADDRESS: serialized_bot_account,
ZERO_ADDRESS: serialized_zero_account,
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY: serialized_swap_commission_setting,
ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY: serialized_genesis_members_setting,
})
swap_info = AtomicSwapInfo()
swap_info.swap_id = SWAP_ID
swap_info.state = AtomicSwapInfo.OPENED
swap_info.amount = TOKENS_AMOUNT_TO_SWAP
swap_info.created_at = CURRENT_TIMESTAMP
swap_info.email_address_encrypted_optional = ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR
swap_info.sender_address = BOT_ADDRESS
swap_info.sender_address_non_local = BOT_ETHEREUM_ADDRESS
swap_info.receiver_address = ALICE_ADDRESS
swap_info.is_initiator = True
serialized_swap_info = swap_info.SerializeToString()
expected_bot_account = Account()
expected_bot_account.balance = 5000 - TOKENS_AMOUNT_TO_SWAP - SWAP_COMMISSION_AMOUNT
serialized_expected_bot_account = expected_bot_account.SerializeToString()
expected_zero_account = Account()
expected_zero_account.balance = SWAP_COMMISSION_AMOUNT
serialized_expected_zero_account = expected_zero_account.SerializeToString()
expected_state = {
BOT_ADDRESS: serialized_expected_bot_account,
ZERO_ADDRESS: serialized_expected_zero_account,
ADDRESS_TO_STORE_SWAP_INFO_BY: serialized_swap_info,
}
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
state_as_list = mock_context.get_state(addresses=[
ADDRESS_TO_STORE_SWAP_INFO_BY, BOT_ADDRESS, ZERO_ADDRESS,
])
state_as_dict = {entry.address: entry.data for entry in state_as_list}
assert expected_state == state_as_dict
def test_atomic_swap_init_already_taken_id():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with already existing swap id.
Expect: invalid transaction error is raised with atomic swap id has already been taken error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
swap_info = AtomicSwapInfo()
swap_info.swap_id = SWAP_ID
swap_info.state = AtomicSwapInfo.OPENED
swap_info.amount = TOKENS_AMOUNT_TO_SWAP
swap_info.created_at = CURRENT_TIMESTAMP
swap_info.email_address_encrypted_optional = ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR
swap_info.sender_address = BOT_ADDRESS
swap_info.sender_address_non_local = BOT_ETHEREUM_ADDRESS
swap_info.receiver_address = ALICE_ADDRESS
serialized_swap_info = swap_info.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
ADDRESS_TO_STORE_SWAP_INFO_BY: serialized_swap_info,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert 'Atomic swap ID has already been taken, please use a different one.' == str(error.value)
def test_atomic_swap_init_swap_no_block_config_info():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens when no block config settings.
Expect: invalid transaction error is raised with nlock config not found error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert 'Block config not found.' == str(error.value)
def test_atomic_swap_init_swap_no_block_info():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens when no needed block information.
Expect: invalid transaction error is raised with nlock config not found error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert f'Block {block_info_config.latest_block + 1} not found.' == str(error.value)
def test_atomic_swap_init_swap_receiver_address_invalid_type():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with invalid Alice node address.
Expect: invalid transaction error is raised with atomic swap id has already been taken error message.
"""
invalid_receiver_address = '112934y*(J#QJ3UH*PD(:9B&TYDB*I0b0a8edc4104ef28093ee30'
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=invalid_receiver_address,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert proto_error_msg(
AtomicSwapInitPayload,
{'receiver_address': ['Address is not of a blockchain token type.']}
) == str(error.value)
def test_atomic_swap_init_swap_wrong_commission_address():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with wrong commission settings.
Expect: invalid transaction error is raised with wrong commission address error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
swap_commission_setting = Setting()
swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION, value='-1')
serialized_swap_commission_setting = swap_commission_setting.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY: serialized_swap_commission_setting,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert 'Wrong commission address.' == str(error.value)
def test_atomic_swap_init_swap_no_account_in_state():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens from non-existent bot address.
Expect: invalid transaction error is raised with not enough balance error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
swap_commission_setting = Setting()
swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION, value=str(SWAP_COMMISSION_AMOUNT))
serialized_swap_commission_setting = swap_commission_setting.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY: serialized_swap_commission_setting,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
total_amount = TOKENS_AMOUNT_TO_SWAP + SWAP_COMMISSION_AMOUNT
assert f'Not enough balance to perform the transaction in the amount (with a commission) {total_amount}.' \
== str(error.value)
def test_atomic_swap_init_swap_not_enough_balance():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with not enough bot address balance.
Expect: invalid transaction error is raised with not enough balance error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
bot_account = Account()
bot_account.balance = 0
serialized_bot_account_balance = bot_account.SerializeToString()
swap_commission_setting = Setting()
swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION, value=str(SWAP_COMMISSION_AMOUNT))
serialized_swap_commission_setting = swap_commission_setting.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
BOT_ADDRESS: serialized_bot_account_balance,
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY: serialized_swap_commission_setting,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
total_amount = TOKENS_AMOUNT_TO_SWAP + SWAP_COMMISSION_AMOUNT
assert f'Not enough balance to perform the transaction in the amount (with a commission) {total_amount}.' \
== str(error.value)
| 40.044978
| 119
| 0.774392
| 2,992
| 26,710
| 6.474265
| 0.072193
| 0.058541
| 0.019514
| 0.02256
| 0.812039
| 0.789376
| 0.775231
| 0.766971
| 0.752413
| 0.745031
| 0
| 0.023204
| 0.159379
| 26,710
| 666
| 120
| 40.105105
| 0.839531
| 0.065032
| 0
| 0.688129
| 0
| 0
| 0.060741
| 0.028918
| 0
| 0
| 0.005848
| 0
| 0.018109
| 1
| 0.018109
| false
| 0
| 0.040241
| 0
| 0.05835
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3132e2f36132e44fbd1c6af00fce234363a8d0a2
| 17
|
py
|
Python
|
utils/db_api/__init__.py
|
AleksZavg/Admin-telegram-bot
|
c671419ba9fd5e93df742ebe9443d72afa4c99aa
|
[
"MIT"
] | null | null | null |
utils/db_api/__init__.py
|
AleksZavg/Admin-telegram-bot
|
c671419ba9fd5e93df742ebe9443d72afa4c99aa
|
[
"MIT"
] | null | null | null |
utils/db_api/__init__.py
|
AleksZavg/Admin-telegram-bot
|
c671419ba9fd5e93df742ebe9443d72afa4c99aa
|
[
"MIT"
] | null | null | null |
from . import sql
| 17
| 17
| 0.764706
| 3
| 17
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 17
| 1
| 17
| 17
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3157ab626dece4b479698f85ed5115a820362716
| 1,763
|
py
|
Python
|
CodingInterview2/20_NumericStrings/test_numeric_strings.py
|
hscspring/TheAlgorithms-Python
|
5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f
|
[
"MIT"
] | 10
|
2020-07-06T11:00:58.000Z
|
2022-01-29T09:25:24.000Z
|
CodingInterview2/20_NumericStrings/test_numeric_strings.py
|
hscspring/TheAlgorithms-Python
|
5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f
|
[
"MIT"
] | null | null | null |
CodingInterview2/20_NumericStrings/test_numeric_strings.py
|
hscspring/TheAlgorithms-Python
|
5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f
|
[
"MIT"
] | 3
|
2020-07-13T06:39:23.000Z
|
2020-08-15T16:29:48.000Z
|
from numeric_strings import is_num
def test_pos():
assert is_num("100") == True
def test_operator_pos():
assert is_num("+100") == True
def test_neg():
assert is_num("-123") == True
def test_deci():
assert is_num("3.14") == True
def test_pos_dot():
assert is_num("3.") == True
def test_neg_dot():
assert is_num("-.123") == True
def test_pos_exp():
assert is_num("5e2") == True
def test_deci_exp():
assert is_num("123.56e2") == True
def test_deci_exp_operator():
assert is_num("1.79234234235235E+308") == True
def test_neg_deci():
assert is_num("-1E-16") == True
def test_all0():
assert is_num("00000") == True
def test_pos_all0():
assert is_num("+0000") == True
def test_neg_all0():
assert is_num("-0000") == True
def test_pos_0head_deci():
assert is_num("00001.1") == True
def test_neg_0head_deci():
assert is_num("-00001.") == True
def test_0head_not():
assert is_num("001") == False
def test_pos_0head_not():
assert is_num("+001") == False
def test_neg_0head_not():
assert is_num("-001") == False
def pos_operator_pos_not():
assert is_num("1+2") == False
def test_exp_not():
assert is_num("12e") == False
def test_contain_letter_not():
assert is_num("1a3.14") == False
def test_multi_dot_not():
assert is_num("1.2.3") == False
def test_multi_operator_not():
assert is_num("+-5") == False
def test_deci_exp_not():
assert is_num("12e+5.4") == False
def test_dot_not():
assert is_num(".") == False
def test_dot_exp_pos_not():
assert is_num(".e1") == False
def test_exp_pos_not():
assert is_num("e1") == False
def test_operator_dot_not():
assert is_num("+.") == False
def test_none():
assert is_num("") == False
| 18.175258
| 50
| 0.647192
| 285
| 1,763
| 3.659649
| 0.157895
| 0.143816
| 0.305849
| 0.174497
| 0.53116
| 0.490892
| 0.373921
| 0.325983
| 0.130393
| 0.065197
| 0
| 0.072626
| 0.187748
| 1,763
| 96
| 51
| 18.364583
| 0.655726
| 0
| 0
| 0
| 0
| 0
| 0.076836
| 0.011952
| 0
| 0
| 0
| 0
| 0.491525
| 1
| 0.491525
| true
| 0
| 0.016949
| 0
| 0.508475
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
9ed2b77dc14cf259dd4066bb59e60dcb9619622c
| 80
|
py
|
Python
|
batrises/__init__.py
|
persorkki/bat-rises
|
7bf38e5de118a9943106c3e70a7ab1934e76afc4
|
[
"MIT"
] | 1
|
2020-04-04T10:47:19.000Z
|
2020-04-04T10:47:19.000Z
|
batrises/__init__.py
|
persorkki/bat-rises
|
7bf38e5de118a9943106c3e70a7ab1934e76afc4
|
[
"MIT"
] | null | null | null |
batrises/__init__.py
|
persorkki/bat-rises
|
7bf38e5de118a9943106c3e70a7ab1934e76afc4
|
[
"MIT"
] | 1
|
2020-04-04T10:47:21.000Z
|
2020-04-04T10:47:21.000Z
|
from .core import *
from .logs import *
from .conf import *
from .utils import *
| 20
| 20
| 0.7125
| 12
| 80
| 4.75
| 0.5
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 80
| 4
| 20
| 20
| 0.876923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9ef7401526646303d76a1d2c30cb98f235914879
| 5,615
|
py
|
Python
|
checks/check_heatmaps.py
|
joybanerjee08/imgaug
|
b986ba8bf93b7847671e62b4636256e90245b340
|
[
"MIT"
] | 1
|
2019-05-22T09:33:33.000Z
|
2019-05-22T09:33:33.000Z
|
checks/check_heatmaps.py
|
HuuY/imgaug
|
e9d3515b52f2205cee1d3c9a913fcc638d15993b
|
[
"MIT"
] | null | null | null |
checks/check_heatmaps.py
|
HuuY/imgaug
|
e9d3515b52f2205cee1d3c9a913fcc638d15993b
|
[
"MIT"
] | 1
|
2019-03-07T13:58:25.000Z
|
2019-03-07T13:58:25.000Z
|
from __future__ import print_function, division
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
def main():
quokka = ia.quokka(size=0.5)
h, w = quokka.shape[0:2]
heatmap = np.zeros((h, w), dtype=np.float32)
heatmap[70:120, 90:150] = 0.1
heatmap[30:70, 50:65] = 0.5
heatmap[20:50, 55:85] = 1.0
heatmap[120:140, 0:20] = 0.75
heatmaps = ia.HeatmapsOnImage(heatmap[..., np.newaxis], quokka.shape)
print("Affine...")
aug = iaa.Affine(translate_px={"x": 20}, mode="constant", cval=128)
quokka_aug = aug.augment_image(quokka)
heatmaps_aug = aug.augment_heatmaps([heatmaps])[0]
heatmaps_drawn = heatmaps.draw_on_image(quokka)
heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
heatmaps_drawn[0],
heatmaps_aug_drawn[0]
])
)
print("Affine with mode=edge...")
aug = iaa.Affine(translate_px={"x": 20}, mode="edge")
quokka_aug = aug.augment_image(quokka)
heatmaps_aug = aug.augment_heatmaps([heatmaps])[0]
heatmaps_drawn = heatmaps.draw_on_image(quokka)
heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
heatmaps_drawn[0],
heatmaps_aug_drawn[0]
])
)
print("PiecewiseAffine...")
aug = iaa.PiecewiseAffine(scale=0.04)
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]
heatmaps_drawn = heatmaps.draw_on_image(quokka)
heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
heatmaps_drawn[0],
heatmaps_aug_drawn[0]
])
)
print("PerspectiveTransform...")
aug = iaa.PerspectiveTransform(scale=0.04)
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]
heatmaps_drawn = heatmaps.draw_on_image(quokka)
heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
heatmaps_drawn[0],
heatmaps_aug_drawn[0]
])
)
print("ElasticTransformation alpha=3, sig=0.5...")
aug = iaa.ElasticTransformation(alpha=3.0, sigma=0.5)
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]
heatmaps_drawn = heatmaps.draw_on_image(quokka)
heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
heatmaps_drawn[0],
heatmaps_aug_drawn[0]
])
)
print("ElasticTransformation alpha=10, sig=3...")
aug = iaa.ElasticTransformation(alpha=10.0, sigma=3.0)
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]
heatmaps_drawn = heatmaps.draw_on_image(quokka)
heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
heatmaps_drawn[0],
heatmaps_aug_drawn[0]
])
)
print("CopAndPad mode=constant...")
aug = iaa.CropAndPad(px=(-10, 10, 15, -15), pad_mode="constant", pad_cval=128)
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]
heatmaps_drawn = heatmaps.draw_on_image(quokka)
heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
heatmaps_drawn[0],
heatmaps_aug_drawn[0]
])
)
print("CopAndPad mode=constant + percent...")
aug = iaa.CropAndPad(percent=(-0.05, 0.05, 0.1, -0.1), pad_mode="constant", pad_cval=128)
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]
heatmaps_drawn = heatmaps.draw_on_image(quokka)
heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
heatmaps_drawn[0],
heatmaps_aug_drawn[0]
])
)
print("CropAndPad mode=edge...")
aug = iaa.CropAndPad(px=(-10, 10, 15, -15), pad_mode="edge")
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]
heatmaps_drawn = heatmaps.draw_on_image(quokka)
heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
heatmaps_drawn[0],
heatmaps_aug_drawn[0]
])
)
print("Scale...")
aug = iaa.Scale(0.5, interpolation="nearest")
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]
heatmaps_drawn = heatmaps.draw_on_image(quokka)
heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)
ia.imshow(ia.draw_grid([heatmaps_drawn[0], heatmaps_aug_drawn[0]], cols=2))
print("Alpha...")
aug = iaa.Alpha(0.7, iaa.Affine(rotate=20))
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]
heatmaps_drawn = heatmaps.draw_on_image(quokka)
heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
heatmaps_drawn[0],
heatmaps_aug_drawn[0]
])
)
if __name__ == "__main__":
main()
| 31.022099
| 93
| 0.660374
| 749
| 5,615
| 4.639519
| 0.11749
| 0.139281
| 0.120288
| 0.139281
| 0.801727
| 0.801727
| 0.801727
| 0.792806
| 0.77554
| 0.77554
| 0
| 0.034037
| 0.215138
| 5,615
| 180
| 94
| 31.194444
| 0.754482
| 0
| 0
| 0.682119
| 0
| 0
| 0.054319
| 0.011576
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006623
| false
| 0
| 0.02649
| 0
| 0.033113
| 0.07947
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
73255a4f647e1f3b938de7352ceea5cd07766c6a
| 6,172
|
py
|
Python
|
test/test_SepiaPrediction.py
|
lanl/SEPIA
|
0a1e606e1d1072f49e4f3f358962bd8918a5d3a3
|
[
"BSD-3-Clause"
] | 19
|
2020-06-22T16:37:07.000Z
|
2022-02-18T22:50:59.000Z
|
test/test_SepiaPrediction.py
|
lanl/SEPIA
|
0a1e606e1d1072f49e4f3f358962bd8918a5d3a3
|
[
"BSD-3-Clause"
] | 41
|
2020-07-07T22:52:33.000Z
|
2021-11-04T14:05:03.000Z
|
test/test_SepiaPrediction.py
|
lanl/SEPIA
|
0a1e606e1d1072f49e4f3f358962bd8918a5d3a3
|
[
"BSD-3-Clause"
] | 6
|
2020-08-14T18:58:45.000Z
|
2022-03-01T21:00:14.000Z
|
import unittest
import numpy as np
import generate_data
from sepia.SepiaData import SepiaData
from sepia.SepiaModel import SepiaModel
from sepia.SepiaPredict import SepiaXvalEmulatorPrediction, SepiaEmulatorPrediction, SepiaFullPrediction
np.random.seed(42)
class SepiaPredictionTestCase(unittest.TestCase):
def setUp(self, m=100, n=1, nt_sim=50, nt_obs=20, n_theta=3, n_basis=5, sig_n=0.1, seed=42):
multi_data_dict = generate_data.generate_multi_sim_and_obs(m=m, n=n, nt_sim=nt_sim, nt_obs=nt_obs,
n_theta=n_theta, n_basis=n_basis,
sig_n=sig_n, seed=seed)
univ_data_dict = generate_data.generate_univ_sim_and_obs(m=m, n=n, sig_n=sig_n, seed=seed)
d = SepiaData(x_sim=univ_data_dict['t_sim'], y_sim=univ_data_dict['y_sim'])
d.transform_xt()
d.standardize_y()
self.univ_sim_only_model = SepiaModel(d)
d = SepiaData(t_sim=univ_data_dict['t_sim'], y_sim=univ_data_dict['y_sim'], y_obs=univ_data_dict['y_obs'])
d.transform_xt()
d.standardize_y()
self.univ_sim_and_obs_model = SepiaModel(d)
d = SepiaData(x_sim=multi_data_dict['t_sim'], y_sim=multi_data_dict['y_sim'],
y_ind_sim=multi_data_dict['y_ind_sim'])
d.transform_xt()
d.standardize_y()
d.create_K_basis(5)
self.multi_sim_only_model = SepiaModel(d)
d = SepiaData(t_sim=multi_data_dict['t_sim'], y_sim=multi_data_dict['y_sim'],
y_ind_sim=multi_data_dict['y_ind_sim'], y_obs=multi_data_dict['y_obs'],
y_ind_obs=multi_data_dict['y_ind_obs'])
d.transform_xt()
d.standardize_y()
d.create_K_basis(5)
self.multi_sim_and_obs_noD_model = SepiaModel(d)
d = SepiaData(t_sim=multi_data_dict['t_sim'], y_sim=multi_data_dict['y_sim'],
y_ind_sim=multi_data_dict['y_ind_sim'], y_obs=multi_data_dict['y_obs'],
y_ind_obs=multi_data_dict['y_ind_obs'])
d.transform_xt()
d.standardize_y()
d.create_K_basis(5)
d.create_D_basis('linear')
self.multi_sim_and_obs_model = SepiaModel(d)
def test_univariate_sim_only_pred(self):
"""
Tests pred for univariate sim only model
"""
print('Testing univariate sim-only Sepia prediction...', flush=True)
model = self.univ_sim_only_model
model.do_mcmc(50)
samples = model.get_samples(numsamples=5)
pred = SepiaEmulatorPrediction(x_pred=model.data.sim_data.x, t_pred=model.data.sim_data.t,
samples=samples, model=model)
pred.get_w()
pred.get_y()
cvpred = SepiaXvalEmulatorPrediction(samples=samples, model=model)
cvpred.get_w()
cvpred.get_y()
def test_univariate_sim_and_obs_pred(self):
"""
Tests pred for univariate sim and obs model
"""
print('Testing univariate sim and obs Sepia prediction...', flush=True)
model = self.univ_sim_and_obs_model
model.do_mcmc(50)
samples = model.get_samples(numsamples=5)
pred = SepiaEmulatorPrediction( t_pred=model.data.sim_data.t,
samples=samples, model=model)
pred.get_w()
pred.get_y()
cvpred = SepiaXvalEmulatorPrediction(samples=samples, model=model)
cvpred.get_w()
cvpred.get_y()
pred = SepiaFullPrediction( t_pred=model.data.sim_data.t,
samples=samples, model=model)
pred.get_u_v()
pred.get_ysim()
pred.get_ysim(as_obs=True)
def test_multivariate_sim_only_pred(self):
"""
Tests pred for multivariate sim only model
"""
print('Testing multivariate sim-only Sepia prediction...', flush=True)
model = self.multi_sim_only_model
model.do_mcmc(50)
samples = model.get_samples(numsamples=5)
pred = SepiaEmulatorPrediction(x_pred=model.data.sim_data.x, t_pred=model.data.sim_data.t,
samples=samples, model=model)
pred.get_w()
pred.get_y()
cvpred = SepiaXvalEmulatorPrediction(samples=samples, model=model)
cvpred.get_w()
cvpred.get_y()
def test_multivariate_sim_and_obs_pred(self):
"""
Tests pred for multivariate sim and obs model
"""
print('Testing multivariate sim and obs Sepia prediction...', flush=True)
model = self.multi_sim_and_obs_model
model.do_mcmc(50)
samples = model.get_samples(numsamples=5)
pred = SepiaEmulatorPrediction( t_pred=model.data.sim_data.t,
samples=samples, model=model)
pred.get_w()
pred.get_y()
cvpred = SepiaXvalEmulatorPrediction(samples=samples, model=model)
cvpred.get_w()
cvpred.get_y()
pred = SepiaFullPrediction( t_pred=model.data.sim_data.t,
samples=samples, model=model)
pred.get_u_v()
pred.get_ysim()
pred.get_ysim(as_obs=True)
def test_multivariate_sim_and_obs_noD_pred(self):
"""
Tests pred for multivariate sim and obs model no discrep
"""
print('Testing multivariate sim and obs no discrep Sepia prediction...', flush=True)
model = self.multi_sim_and_obs_noD_model
model.do_mcmc(50)
samples = model.get_samples(numsamples=5)
pred = SepiaEmulatorPrediction( t_pred=model.data.sim_data.t,
samples=samples, model=model)
pred.get_w()
pred.get_y()
cvpred = SepiaXvalEmulatorPrediction(samples=samples, model=model)
cvpred.get_w()
cvpred.get_y()
pred = SepiaFullPrediction( t_pred=model.data.sim_data.t,
samples=samples, model=model)
pred.get_u_v()
pred.get_ysim()
pred.get_ysim(as_obs=True)
| 36.305882
| 114
| 0.613739
| 810
| 6,172
| 4.35679
| 0.098765
| 0.045339
| 0.043355
| 0.08841
| 0.862851
| 0.822896
| 0.77019
| 0.732502
| 0.695381
| 0.650609
| 0
| 0.00774
| 0.288237
| 6,172
| 169
| 115
| 36.52071
| 0.795584
| 0.037265
| 0
| 0.675214
| 1
| 0
| 0.064743
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.051282
| 0
| 0.111111
| 0.042735
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7331941f1a8405d5a701de32416b7bf3852dc785
| 28,801
|
py
|
Python
|
sysinv/sysinv/sysinv/sysinv/tests/api/test_certificate.py
|
albailey/config
|
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
|
[
"Apache-2.0"
] | 10
|
2020-02-07T18:57:44.000Z
|
2021-09-11T10:29:34.000Z
|
sysinv/sysinv/sysinv/sysinv/tests/api/test_certificate.py
|
albailey/config
|
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
|
[
"Apache-2.0"
] | 1
|
2021-01-14T12:01:55.000Z
|
2021-01-14T12:01:55.000Z
|
sysinv/sysinv/sysinv/sysinv/tests/api/test_certificate.py
|
albailey/config
|
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
|
[
"Apache-2.0"
] | 10
|
2020-10-13T08:37:46.000Z
|
2022-02-09T00:21:25.000Z
|
#
# Copyright (c) 2017-2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Tests for the API /certificate_install/delete methods.
"""
import json
import mock
import os
import sys
import uuid as UUID
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from six.moves import http_client
from sysinv.api.controllers.v1 import certificate as cert_api
from sysinv.common import constants
from sysinv.tests.api import base
from sysinv.tests.db import utils as dbutils
SKIP_PYTHON_VERSIONS = {'RFC_6125': [(3, 9)]}
def check_skip_test(test_reference):
# In Python 3.9 versus Python 3.6 RFC 6125 got handling improvements
# in the STDLIB. Check _dnsname_match implementation.
versions = SKIP_PYTHON_VERSIONS['RFC_6125']
runtime_version = sys.version_info[:2]
if (runtime_version[0], runtime_version[1]) in versions:
test_reference.skipTest("Skipping SAN tests not aligning to RFC 6125, "
"section 6.4.3 in Python {}.{}"
"".format(runtime_version[0],
runtime_version[1]))
class FakeConductorAPI(object):
def __init__(self):
self.config_certificate = self.fake_config_certificate
self.delete_certificate = mock.MagicMock()
self.config_certificate_return = None
self.platcert_k8s_secret_value = False
def fake_config_certificate(self, context, pem, config_dict):
return self.config_certificate_return
def setup_config_certificate(self, data):
self.config_certificate_return = data
def update_admin_ep_certificate(self, context):
return True
class CertificateTestCase(base.FunctionalTest):
def setUp(self):
super(CertificateTestCase, self).setUp()
def test_check_cert_dns_name_valid_SAN(self):
# This certificate contains
# CN: *.vbox.local
# DNS: *.vbox.local
certfile = os.path.join(os.path.dirname(__file__), "data",
'cert-with-key-SAN.pem')
with open(certfile, 'rb') as f:
pem_contents = f.read()
cert = x509.load_pem_x509_certificate(pem_contents,
default_backend())
result = cert_api._check_cert_dns_name(cert, 'vbox.local')
self.assertTrue(result)
result = cert_api._check_cert_dns_name(cert, 'domain.org')
self.assertIn("doesn't match", str(result))
result = cert_api._check_cert_dns_name(cert, 'lab.vbox.local')
self.assertIn("doesn't match", str(result))
def test_check_cert_dns_name_invalid_SAN(self):
# This certificate contains
# CN: *.vbox.local
# DNS:*.*.vbox.local, DNS:bad.*.vbox.local
check_skip_test(self)
certfile = os.path.join(os.path.dirname(__file__), "data",
'cert-with-key-invalidDNS.pem')
with open(certfile, 'rb') as f:
pem_contents = f.read()
cert = x509.load_pem_x509_certificate(pem_contents,
default_backend())
result = cert_api._check_cert_dns_name(cert, 'vbox.local')
self.assertIn("doesn't match", str(result))
result = cert_api._check_cert_dns_name(cert, 'a.vbox.local')
self.assertIn("doesn't match", str(result))
result = cert_api._check_cert_dns_name(cert, 'a.b.vbox.local')
self.assertIn("doesn't match", str(result))
result = cert_api._check_cert_dns_name(cert, 'bad.b.vbox.local')
self.assertIn("doesn't match", str(result))
def test_check_cert_dns_name_CN_only(self):
# This certificate contains CN:*.vbox.local
certfile = os.path.join(os.path.dirname(__file__), "data",
'cert-with-key-CNnoSAN.pem')
with open(certfile, 'rb') as f:
pem_contents = f.read()
cert = x509.load_pem_x509_certificate(pem_contents,
default_backend())
result = cert_api._check_cert_dns_name(cert, 'vbox.local')
self.assertTrue(result)
result = cert_api._check_cert_dns_name(cert, 'a.vbox.local')
self.assertIn("doesn't match", str(result))
result = cert_api._check_cert_dns_name(cert, 'a.b.vbox.local')
self.assertIn("doesn't match", str(result))
result = cert_api._check_cert_dns_name(cert, 'bad.b.vbox.local')
self.assertIn("doesn't match", str(result))
def test_check_cert_dns_name_multi_SAN(self):
# This certificate contains
# CN: *.vbox.local
# DNS: *.vbox.local, bad.*.vbox.local, *.example.com
check_skip_test(self)
certfile = os.path.join(os.path.dirname(__file__), "data",
'cert-with-key-multiSAN.pem')
with open(certfile, 'rb') as f:
pem_contents = f.read()
cert = x509.load_pem_x509_certificate(pem_contents,
default_backend())
result = cert_api._check_cert_dns_name(cert, 'vbox.local')
self.assertTrue(result)
# domain matches one of the DNS names, but not the CN
result = cert_api._check_cert_dns_name(cert, 'example.com')
self.assertTrue(result)
result = cert_api._check_cert_dns_name(cert, 'a.vbox.local')
self.assertIn("doesn't match", str(result))
result = cert_api._check_cert_dns_name(cert, 'x.example.com')
self.assertIn("doesn't match", str(result))
def test_check_cert_dns_name_CN_differ_SAN(self):
# This certificate contains
# CN: *.vbox.local
# DNS: bad.*.vbox.local, *.example.com
check_skip_test(self)
certfile = os.path.join(os.path.dirname(__file__), "data",
'cert-with-key-CNdifferSAN.pem')
with open(certfile, 'rb') as f:
pem_contents = f.read()
cert = x509.load_pem_x509_certificate(pem_contents,
default_backend())
# domain matches CN, but does not match any of the DNS names
result = cert_api._check_cert_dns_name(cert, 'vbox.local')
self.assertIn("doesn't match", str(result))
# domain matches one of the DNS names, but not the CN
result = cert_api._check_cert_dns_name(cert, 'example.com')
self.assertTrue(result)
result = cert_api._check_cert_dns_name(cert, 'a.vbox.local')
self.assertIn("doesn't match", str(result))
result = cert_api._check_cert_dns_name(cert, 'x.example.com')
self.assertIn("doesn't match", str(result))
class ApiCertificateTestCaseMixin(object):
# API_HEADERS are a generic header passed to most API calls
API_HEADERS = {'User-Agent': 'sysinv-test'}
# API_PREFIX is the prefix for the URL
API_PREFIX = '/certificate'
# RESULT_KEY is the python table key for the list of results
RESULT_KEY = 'certificates'
# COMMON_FIELD is a field that is known to exist for inputs and outputs
COMMON_FIELD = 'certificates'
# expected_api_fields are attributes that should be populated by
# an API query
expected_api_fields = ['uuid']
# hidden_api_fields are attributes that should not be populated by
# an API query
hidden_api_fields = []
def setUp(self):
super(ApiCertificateTestCaseMixin, self).setUp()
self.fake_conductor_api = FakeConductorAPI()
p = mock.patch('sysinv.conductor.rpcapi.ConductorAPI')
self.mock_conductor_api = p.start()
self.mock_conductor_api.return_value = self.fake_conductor_api
self.addCleanup(p.stop)
def get_single_url(self, uuid):
return '%s/%s' % (self.API_PREFIX, uuid)
def _create_db_object(self, obj_id=None):
return dbutils.create_test_certificate(
id=obj_id, certtype='ssl_ca', signature='ssl_ca_123456789')
@staticmethod
def extract_certs_from_pem_file(certfile):
""" extract certificates from a X509 PEM file
"""
marker = b'-----BEGIN CERTIFICATE-----'
with open(certfile, 'rb') as f:
pem_contents = f.read()
start = 0
certs = []
while True:
index = pem_contents.find(marker, start)
if index == -1:
break
cert = x509.load_pem_x509_certificate(pem_contents[index::],
default_backend())
certs.append(cert)
start = index + len(marker)
return certs
@staticmethod
def get_cert_signature(mode, cert):
signature = mode + '_' + str(cert.serial_number)
if len(signature) > 255:
signature = signature[:255]
return signature
class ApiCertificatePostTestSuite(ApiCertificateTestCaseMixin,
base.FunctionalTest):
""" Certificate post operations
"""
def setUp(self):
super(ApiCertificatePostTestSuite, self).setUp()
self.create_test_isystem()
# Mock the KubeOperator
self.kube_get_secret_result = None
def mock_kube_get_secret(obj, name, namespace):
return self.kube_get_secret_result
self.mocked_kube_get_secret = mock.patch(
'sysinv.common.kubernetes.KubeOperator.kube_get_secret',
mock_kube_get_secret)
self.mocked_kube_get_secret.start()
self.addCleanup(self.mocked_kube_get_secret.stop)
def create_test_isystem(self):
return dbutils.create_test_isystem(capabilities={'https_enabled': True})
# Test successful POST operation to install 1 CA certificate
def test_install_one_CA_certificate(self):
mode = 'ssl_ca'
certfile = os.path.join(os.path.dirname(__file__), "data",
'ca-cert-one-cert.pem')
in_certs = self.extract_certs_from_pem_file(certfile)
fake_config_certificate_return = []
for in_cert in in_certs:
fake_config_certificate_return.append(
{'signature': self.get_cert_signature(mode, in_cert),
'not_valid_before': in_cert.not_valid_before,
'not_valid_after': in_cert.not_valid_after})
self.fake_conductor_api.\
setup_config_certificate(fake_config_certificate_return)
data = {'mode': mode}
files = [('file', certfile)]
response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'),
data,
upload_files=files,
headers=self.API_HEADERS,
expect_errors=False)
self.assertEqual(response.status_code, http_client.OK)
resp = json.loads(response.body)
self.assertIn('certificates', resp)
ret_certs = resp.get('certificates')
self.assertEqual(len(in_certs), len(ret_certs))
for ret_cert in ret_certs:
self.assertIn('certtype', ret_cert)
self.assertEqual(ret_cert.get('certtype'), mode)
self.assertIn('signature', ret_cert)
self.assertIn('start_date', ret_cert)
self.assertIn('expiry_date', ret_cert)
found_match = False
for in_cert in in_certs:
ret_cert_start_date = str(ret_cert.get('start_date'))
ret_cert_start_date = ret_cert_start_date.replace('+00:00', '')
ret_cert_expiry_date = str(ret_cert.get('expiry_date'))
ret_cert_expiry_date = \
ret_cert_expiry_date.replace('+00:00', '')
if ret_cert.get('signature') == \
self.get_cert_signature(mode, in_cert) and \
ret_cert_start_date == \
str(in_cert.not_valid_before) and \
ret_cert_expiry_date == \
str(in_cert.not_valid_after):
found_match = True
self.assertTrue(found_match)
def test_renew_certificate(self):
certtype = constants.CERTIFICATE_TYPE_ADMIN_ENDPOINT
data = {'certtype': certtype}
response = self.post_json('%s/%s' % (self.API_PREFIX, 'renew_certificate'),
data,
headers=self.API_HEADERS,
expect_errors=True)
self.assertTrue(response)
# Test successful POST operation to install 2 CA certificate
def test_install_two_CA_certificate(self):
mode = 'ssl_ca'
certfile = os.path.join(os.path.dirname(__file__), "data",
'ca-cert-two-certs.pem')
in_certs = self.extract_certs_from_pem_file(certfile)
fake_config_certificate_return = []
for in_cert in in_certs:
fake_config_certificate_return.append(
{'signature': self.get_cert_signature(mode, in_cert),
'not_valid_before': in_cert.not_valid_before,
'not_valid_after': in_cert.not_valid_after})
self.fake_conductor_api.\
setup_config_certificate(fake_config_certificate_return)
data = {'mode': mode}
files = [('file', certfile)]
response = self.post_with_files('%s/%s' % (self.API_PREFIX,
'certificate_install'),
data,
upload_files=files,
headers=self.API_HEADERS,
expect_errors=False)
self.assertEqual(response.status_code, http_client.OK)
resp = json.loads(response.body)
self.assertIn('certificates', resp)
ret_certs = resp.get('certificates')
self.assertEqual(len(in_certs), len(ret_certs))
for ret_cert in ret_certs:
self.assertIn('certtype', ret_cert)
self.assertEqual(ret_cert.get('certtype'), mode)
self.assertIn('signature', ret_cert)
self.assertIn('start_date', ret_cert)
self.assertIn('expiry_date', ret_cert)
found_match = False
for in_cert in in_certs:
ret_cert_start_date = str(ret_cert.get('start_date'))
ret_cert_start_date = ret_cert_start_date.replace('+00:00', '')
ret_cert_expiry_date = str(ret_cert.get('expiry_date'))
ret_cert_expiry_date = \
ret_cert_expiry_date.replace('+00:00', '')
if ret_cert.get('signature') == \
self.get_cert_signature(mode, in_cert) and \
ret_cert_start_date == \
str(in_cert.not_valid_before) and \
ret_cert_expiry_date == \
str(in_cert.not_valid_after):
found_match = True
self.assertTrue(found_match)
# Test successful POST operation to install ssl certificate signed by
# intermediate CA
def test_install_2xcert_1xkey_ssl_certificate(self):
mode = 'ssl'
certfile = os.path.join(os.path.dirname(__file__), "data",
'ssl-cert-2xcert-1xkey-with-key.pem')
in_certs = self.extract_certs_from_pem_file(certfile)
fake_config_certificate_return = []
for index, in_cert in enumerate(in_certs):
is_ca = False if index == 0 else True
fake_config_certificate_return.append(
{'signature': self.get_cert_signature(mode, in_cert),
'not_valid_before': in_cert.not_valid_before,
'not_valid_after': in_cert.not_valid_after,
'is_ca': is_ca})
self.fake_conductor_api.\
setup_config_certificate(fake_config_certificate_return)
data = {'mode': mode}
files = [('file', certfile)]
response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'),
data,
upload_files=files,
headers=self.API_HEADERS,
expect_errors=False)
self.assertEqual(response.status_code, http_client.OK)
resp = json.loads(response.body)
self.assertIn('certificates', resp)
ret_certs = resp.get('certificates')
# The installed cert contains the server cert and the intermediate
# CA cert but the API returns only the server cert, which should match
# the server cert in the cert file (the first one).
self.assertEqual(len(ret_certs), 1)
ret_cert = ret_certs[0]
in_cert = in_certs[0]
self.assertIn('certtype', ret_cert)
self.assertEqual(ret_cert.get('certtype'), mode)
self.assertIn('signature', ret_cert)
self.assertIn('start_date', ret_cert)
self.assertIn('expiry_date', ret_cert)
ret_cert_start_date = str(ret_cert.get('start_date'))
ret_cert_start_date = ret_cert_start_date.replace('+00:00', '')
ret_cert_expiry_date = str(ret_cert.get('expiry_date'))
ret_cert_expiry_date = ret_cert_expiry_date.replace('+00:00', '')
found_match = False
if ret_cert.get('signature') == \
self.get_cert_signature(mode, in_cert) and \
ret_cert_start_date == \
str(in_cert.not_valid_before) and \
ret_cert_expiry_date == \
str(in_cert.not_valid_after):
found_match = True
self.assertTrue(found_match)
# Test POST operation to install ssl certificate signed by intermediate CA,
# but the server cert and intermediate cert in the file is in wrong order.
def test_install_2xcert_1xkey_ssl_certificate_wrong_order(self):
mode = 'ssl'
certfile = os.path.join(os.path.dirname(__file__), "data",
'ssl-cert-2xcert-1xkey-with-key-wrong-order.pem')
data = {'mode': mode}
files = [('file', certfile)]
response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'),
data,
upload_files=files,
headers=self.API_HEADERS,
expect_errors=True)
self.assertTrue(response.body)
resp = json.loads(response.body)
self.assertTrue(resp.get('error'))
fault_string_expected = 'The first cert in the file should not be a ' \
'CA cert'
self.assertIn(fault_string_expected, str(resp.get('error')))
# Test successful POST operation to install docker_registry certificate
# signed by intermediate CA
def test_install_2xcert_1xkey_docker_registry_certificate(self):
mode = 'docker_registry'
certfile = os.path.join(os.path.dirname(__file__), "data",
'docker_registry-cert-2xcert-1xkey-with-key.pem')
in_certs = self.extract_certs_from_pem_file(certfile)
fake_config_certificate_return = []
for index, in_cert in enumerate(in_certs):
is_ca = False if index == 0 else True
fake_config_certificate_return.append(
{'signature': self.get_cert_signature(mode, in_cert),
'not_valid_before': in_cert.not_valid_before,
'not_valid_after': in_cert.not_valid_after,
'is_ca': is_ca})
self.fake_conductor_api.\
setup_config_certificate(fake_config_certificate_return)
data = {'mode': mode}
files = [('file', certfile)]
response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'),
data,
upload_files=files,
headers=self.API_HEADERS,
expect_errors=False)
self.assertEqual(response.status_code, http_client.OK)
resp = json.loads(response.body)
self.assertIn('certificates', resp)
ret_certs = resp.get('certificates')
# The installed cert contains the server cert and the intermediate
# CA cert but the API returns only the server cert, which should match
# the server cert in the cert file (the first one).
self.assertEqual(len(ret_certs), 1)
ret_cert = ret_certs[0]
in_cert = in_certs[0]
self.assertIn('certtype', ret_cert)
self.assertEqual(ret_cert.get('certtype'), mode)
self.assertIn('signature', ret_cert)
self.assertIn('start_date', ret_cert)
self.assertIn('expiry_date', ret_cert)
ret_cert_start_date = str(ret_cert.get('start_date'))
ret_cert_start_date = ret_cert_start_date.replace('+00:00', '')
ret_cert_expiry_date = str(ret_cert.get('expiry_date'))
ret_cert_expiry_date = ret_cert_expiry_date.replace('+00:00', '')
found_match = False
if ret_cert.get('signature') == \
self.get_cert_signature(mode, in_cert) and \
ret_cert_start_date == \
str(in_cert.not_valid_before) and \
ret_cert_expiry_date == \
str(in_cert.not_valid_after):
found_match = True
self.assertTrue(found_match)
# Test POST operation to install docker_registry certificate signed by
# intermediate CA, but the server cert and intermediate cert in the file
# is in wrong order.
def test_install_2xcert_1xkey_docker_registry_certificate_wrong_order(self):
mode = 'docker_registry'
certfile = os.path.join(os.path.dirname(__file__), "data",
'docker_registry-cert-2xcert-1xkey-with-key-wrong-order.pem')
data = {'mode': mode}
files = [('file', certfile)]
response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'),
data,
upload_files=files,
headers=self.API_HEADERS,
expect_errors=True)
self.assertTrue(response.body)
resp = json.loads(response.body)
self.assertTrue(resp.get('error'))
fault_string_expected = 'The first cert in the file should not be a ' \
'CA cert'
self.assertIn(fault_string_expected, str(resp.get('error')))
# Test failed installation of ssl certificate managed by cert-manager
def test_force_failure_install_ssl_certificate(self):
self.force_failure_install_certificate(constants.CERT_MODE_SSL)
# Test failed installation of docker_registry certificate managed by cert-manager
def test_force_failure_install_docker_registry_certificate(self):
self.force_failure_install_certificate(constants.CERT_MODE_DOCKER_REGISTRY)
def force_failure_install_certificate(self, mode):
certfile = os.path.join(os.path.dirname(__file__), "data",
'ssl-cert-2xcert-1xkey-with-key.pem')
in_certs = self.extract_certs_from_pem_file(certfile)
fake_config_certificate_return = []
for index, in_cert in enumerate(in_certs):
is_ca = False if index == 0 else True
fake_config_certificate_return.append(
{'signature': self.get_cert_signature(mode, in_cert),
'not_valid_before': in_cert.not_valid_before,
'not_valid_after': in_cert.not_valid_after,
'is_ca': is_ca})
self.fake_conductor_api.\
setup_config_certificate(fake_config_certificate_return)
# Set k8s_secret value to True (mark it as being managed by cert-manager)
self.kube_get_secret_result = 'true'
# Default behavior (force=false) should fail
data = {'mode': mode}
files = [('file', certfile)]
response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'),
data,
upload_files=files,
headers=self.API_HEADERS,
expect_errors=True)
self.assertEqual(response.status_code, http_client.OK)
self.assertTrue(response.body)
resp = json.loads(response.body)
self.assertTrue(resp.get('error'))
fault_err_msg = "Certificate is currently being managed by cert-manager"
self.assertIn(fault_err_msg, str(resp.get('error')))
# Test successful forced installation of ssl certificate managed by cert-manager
def test_force_success_install_ssl_certificate(self):
self.force_success_install_certificate(constants.CERT_MODE_SSL)
# Test successful forced installation of docker_registry certificate managed by cert-manager
def test_force_success_install_docker_registry_certificate(self):
self.force_success_install_certificate(constants.CERT_MODE_DOCKER_REGISTRY)
def force_success_install_certificate(self, mode):
certfile = os.path.join(os.path.dirname(__file__), "data",
'ssl-cert-2xcert-1xkey-with-key.pem')
in_certs = self.extract_certs_from_pem_file(certfile)
fake_config_certificate_return = []
for index, in_cert in enumerate(in_certs):
is_ca = False if index == 0 else True
fake_config_certificate_return.append(
{'signature': self.get_cert_signature(mode, in_cert),
'not_valid_before': in_cert.not_valid_before,
'not_valid_after': in_cert.not_valid_after,
'is_ca': is_ca})
self.fake_conductor_api.\
setup_config_certificate(fake_config_certificate_return)
# Set k8s_secret value to True (mark it as being managed by cert-manager)
self.kube_get_secret_result = 'true'
data = {'mode': mode, 'force': 'true'}
files = [('file', certfile)]
response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'),
data,
upload_files=files,
headers=self.API_HEADERS,
expect_errors=True)
self.assertEqual(response.status_code, http_client.OK)
resp = json.loads(response.body)
self.assertIn('certificates', resp)
class ApiCertificateDeleteTestSuite(ApiCertificateTestCaseMixin,
base.FunctionalTest):
""" Certificate delete operations
"""
def setUp(self):
super(ApiCertificateDeleteTestSuite, self).setUp()
self.delete_object = self._create_db_object()
# Test successful CA certficate DELETE operation
def test_delete_ca_certificate(self):
uuid = self.delete_object.uuid
certtype = self.delete_object.certtype
signature = self.delete_object.signature
response = self.delete(self.get_single_url(uuid),
headers=self.API_HEADERS,
expect_errors=False)
self.assertEqual(response.status_code, http_client.OK)
self.assertTrue(response.body)
resp = json.loads(response.body)
self.assertIn('uuid', resp)
self.assertEqual(uuid, resp.get('uuid'))
self.assertIn('certtype', resp)
self.assertEqual(certtype, resp.get('certtype'))
self.assertIn('signature', resp)
self.assertEqual(signature, resp.get('signature'))
# Test CA certficate DELETE operation, no certificate found
def test_delete_ca_certificate_not_found(self):
uuid = UUID.uuid4()
response = self.delete(self.get_single_url(uuid),
headers=self.API_HEADERS,
expect_errors=True)
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertTrue(response.body)
resp = json.loads(response.body)
self.assertTrue(resp.get('error_message'))
fault_string_expected = 'No certificate found for %s' % uuid
self.assertIn(fault_string_expected, str(resp.get('error_message')))
| 43.050822
| 96
| 0.604528
| 3,369
| 28,801
| 4.871772
| 0.092015
| 0.029001
| 0.014257
| 0.022178
| 0.780662
| 0.756474
| 0.732407
| 0.725888
| 0.720344
| 0.712179
| 0
| 0.008344
| 0.300892
| 28,801
| 668
| 97
| 43.115269
| 0.806804
| 0.100135
| 0
| 0.704082
| 0
| 0
| 0.098939
| 0.019006
| 0
| 0
| 0
| 0
| 0.163265
| 1
| 0.071429
| false
| 0
| 0.02449
| 0.012245
| 0.134694
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7338c0391a559bd8286ca85271a0f50ccb994929
| 154
|
py
|
Python
|
src/arcas/__init__.py
|
Nikoleta-v3/Arcas
|
afbc4a35a6e73c9f041e7515b36070bd450a9dd5
|
[
"MIT"
] | 15
|
2017-02-24T21:05:44.000Z
|
2021-07-06T07:49:59.000Z
|
src/arcas/__init__.py
|
Nikoleta-v3/Arcas
|
afbc4a35a6e73c9f041e7515b36070bd450a9dd5
|
[
"MIT"
] | 18
|
2016-11-29T00:10:43.000Z
|
2017-03-28T19:28:03.000Z
|
src/arcas/__init__.py
|
Nikoleta-v3/Arcas
|
afbc4a35a6e73c9f041e7515b36070bd450a9dd5
|
[
"MIT"
] | 1
|
2017-03-28T09:06:57.000Z
|
2017-03-28T09:06:57.000Z
|
from .IEEE.main import Ieee
from .arXiv.main import Arxiv
from .nature.main import Nature
from .Springer.main import Springer
from .PLOS.main import Plos
| 25.666667
| 35
| 0.805195
| 25
| 154
| 4.96
| 0.32
| 0.403226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12987
| 154
| 5
| 36
| 30.8
| 0.925373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b40a7043ff53d9c696a1ef4a0450a037ad688311
| 73
|
py
|
Python
|
nfce/__init__.py
|
ypereirars/nfescrapper
|
9b4c979e6580d0e83c71f7ee869cec60b446ace8
|
[
"MIT"
] | null | null | null |
nfce/__init__.py
|
ypereirars/nfescrapper
|
9b4c979e6580d0e83c71f7ee869cec60b446ace8
|
[
"MIT"
] | null | null | null |
nfce/__init__.py
|
ypereirars/nfescrapper
|
9b4c979e6580d0e83c71f7ee869cec60b446ace8
|
[
"MIT"
] | null | null | null |
from nfce.parser import NFCeParser
from nfce.scrapper import NfeScrapper
| 24.333333
| 37
| 0.863014
| 10
| 73
| 6.3
| 0.7
| 0.253968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 73
| 3
| 37
| 24.333333
| 0.969231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b40b06d25505a8324197cab81900f127ef6d9f6f
| 4,120
|
py
|
Python
|
gym-env/gym-symbol/gym_symbol/__init__.py
|
tttor/nbwpg
|
271718362cf0cd810c7ea0cd9726e77276947e58
|
[
"MIT"
] | null | null | null |
gym-env/gym-symbol/gym_symbol/__init__.py
|
tttor/nbwpg
|
271718362cf0cd810c7ea0cd9726e77276947e58
|
[
"MIT"
] | null | null | null |
gym-env/gym-symbol/gym_symbol/__init__.py
|
tttor/nbwpg
|
271718362cf0cd810c7ea0cd9726e77276947e58
|
[
"MIT"
] | null | null | null |
from gym.envs.registration import register
# gridnav: square ##############################################################
register(
id='GridNav_2-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'gridnav_2_v0.yaml'}
)
register(
id='GridNav_2-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'gridnav_2_v1.yaml'}
)
register(
id='GridNav_3-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'gridnav_3_v0.yaml'}
)
register(
id='GridNav_3-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'gridnav_3_v1.yaml'}
)
# nchain modified ##############################################################
# gym.error.Error: Cannot re-register id: NChain-v0
register(
id='NChain_mod-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'nchain_mod_v0.yaml'}
)
register(
id='NChain_mod-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'nchain_mod_v1.yaml'}
)
# tor ##########################################################################
register(
id='Tor_20201121a-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'tor_20201121a.yaml'}
)
register(
id='Tor_20201121a-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'tor_20201121a_v1.yaml'}
)
register(
id='hordijk_example-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'hordijk_example_v0.yaml'}
)
register(
id='Hordijk_example-v3',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'hordijk_example_v3.yaml'}
)
register(
id='Hordijk_example-v4',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'hordijk_example_v4.yaml'}
)
register(
id='Tor_20210306-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'tor_20210306_v0.yaml'}
)
register(
id='Tor_20210306-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'tor_20210306_v1.yaml'}
)
register(
id='Tor_20210307-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'tor_20210307_v0.yaml'}
)
register(
id='Tor_20210307-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'tor_20210307_v1.yaml'}
)
# feinberg_2002_hmdp ###########################################################
register(
id='Example_3_1-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_3_1.yaml'}
)
register(
id='Example_3_3-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_3_3.yaml'}
)
register(
id='Example_8_1-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_8_1.yaml'}
)
# puterman_1994_mdp ############################################################
register(
id='Example_10_1_1-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_10_1_1.yaml'}
)
register(
id='Example_10_1_2-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_10_1_2.yaml'}
)
register(
id='Example_10_1_2-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_10_1_2_v1.yaml'}
)
register(
id='Example_10_2_2-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_10_2_2.yaml'}
)
register(
id='Problem_10_7-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'problem_10_7.yaml'}
)
register(
id='Problem_10_9-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'problem_10_9.yaml'}
)
register(
id='Problem_6_64-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'problem_6_64.yaml'}
)
| 26.075949
| 80
| 0.664078
| 494
| 4,120
| 5.190283
| 0.09919
| 0.101404
| 0.126755
| 0.185257
| 0.858034
| 0.712949
| 0.712949
| 0.694228
| 0.694228
| 0.694228
| 0
| 0.059425
| 0.121845
| 4,120
| 157
| 81
| 26.242038
| 0.649254
| 0.030583
| 0
| 0.396825
| 0
| 0
| 0.550559
| 0.289452
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.007937
| 0
| 0.007937
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b4541a67d882db4c8b1c00ba32f1b9ffd8c97716
| 41
|
py
|
Python
|
pydl/networks/__init__.py
|
AndreiDavydov/Poisson_Denoiser
|
a0b8f3dce8282b8e50d44cacb7bdc4fc6d4abc22
|
[
"MIT"
] | 4
|
2019-12-24T10:54:40.000Z
|
2021-12-27T14:07:06.000Z
|
pydl/models/__init__.py
|
AndreiDavydov/Poisson_Denoiser
|
a0b8f3dce8282b8e50d44cacb7bdc4fc6d4abc22
|
[
"MIT"
] | null | null | null |
pydl/models/__init__.py
|
AndreiDavydov/Poisson_Denoiser
|
a0b8f3dce8282b8e50d44cacb7bdc4fc6d4abc22
|
[
"MIT"
] | 1
|
2020-09-28T06:04:12.000Z
|
2020-09-28T06:04:12.000Z
|
from . import UDNet
from . import ResDNet
| 20.5
| 21
| 0.780488
| 6
| 41
| 5.333333
| 0.666667
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 41
| 2
| 21
| 20.5
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
81f31187d6e4c9747b39ee4614f35e44d5302f54
| 12,973
|
py
|
Python
|
excel_functions.py
|
Lol-Hi/cs-feedback-survey
|
bd0ce694f0bafcc7aaf1a28e5b0f6366c57a3690
|
[
"MIT"
] | null | null | null |
excel_functions.py
|
Lol-Hi/cs-feedback-survey
|
bd0ce694f0bafcc7aaf1a28e5b0f6366c57a3690
|
[
"MIT"
] | null | null | null |
excel_functions.py
|
Lol-Hi/cs-feedback-survey
|
bd0ce694f0bafcc7aaf1a28e5b0f6366c57a3690
|
[
"MIT"
] | null | null | null |
#Importing other libraries
import openpyxl #allows me to read from an Excel spreadsheet
def openExcel(filename, sheetname):
"""
Reads the responses from the allocated excel sheet
Returns a 2D list containing the responses for each question
>>> openExcel("responses_testing.xlsx", "Form responses 1")
[['Your Name', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden', 'Hidden'], ['I would like to own a Microbit set for my own learning', 4, 4, 5, 1, 2, 2, 5, 5, 3, 5, 5, 5, 5, 5, 5, 3, 4, 5, 5, 3, 4, 5, 3, 4, 1, 5, 5, 4, 4, 3, 5, 4, 3, 2, 4, 5, 4, 3, 5, 2, 1, 2, 5, 4, 5, 5, 4, 4, 5, 2, 4, 3, 4, 4, 4, 3, 2, 2, 4, 5, 5, 3, 5, 3, 1, 4, 1, 5, 4, 5, 5, 5, 5, 3, 4, 3, 3, 4, 3, 3, 5, 5, 3, 3, 3, 5, 1, 3, 3, 5, 4, 5, 5, 5, 3, 5, 5, 5, 3, 5, 5, 5, 4, 2, 4, 5, 5, 2, 5, 3, 5, 3, 3, 5, 4, 5, 2, 4, 5, 5, 4, 4, 4, 5, 5, 2, 3, 4, 3, 1, 3, 2, 5, 5, 2, 5, 3, 4, 4, 5, 4, 3, 4, 5, 5, 1, 3, 3, 5, 5, 5, 5, 3, 2, 3, 3, 3, 3, 5, 3, 3, 3, 2, 2, 5, 5, 5, 5, 1, 3, 3, 3, 4, 5, 5, 5, 4, 4, 4, 2, 5, 5, 4, 5, 5, 5, 5, 5, 5, 4, 3, 5, 3, 4, 3, 5, 5, 3, 5, 3, 5], ['I would consider using Microbit for my future school projects ', 'Maybe', 'Yes', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'Yes', 'Maybe', 'Maybe', 'Yes', 'Yes', 'Yes', 'Maybe', 'Yes', 'Maybe', 'Maybe', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'Yes', 'Yes', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'No', 'No', 'Yes', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'No', 'Maybe', 'Yes', 'Yes', 'Maybe', 'Yes', 'No', 'Yes', 'Yes', 'Yes', 'No', 'Yes', 'Maybe', 'Maybe', 'Yes', 'Maybe', 'Yes', 'Maybe', 'Yes', 'Maybe', 'Yes', 'Yes', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Yes', 'Yes', 'Yes', 'Maybe', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Yes', 'Maybe', 'Maybe', 'Yes', 'Yes', 'Maybe', 'No', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'No', 'Yes', 'Maybe', 'Maybe', 'No', 'Maybe', 'Yes', 'Yes', 'Yes', 'Maybe', 'Yes', 'Maybe', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'No', 'Maybe', 'No', 'Yes', 'Yes', 'No', 'Yes', 'Yes', 'Yes', 'No', 'Maybe', 'No', 'Maybe', 'Yes', 'Yes', 'Yes', 'No', 'Maybe', 'Yes', 'Yes', 'Maybe', 'Yes', 'Yes', 'Maybe', 'No', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Yes', 'Maybe', 'No', 'Maybe', 'No', 'Maybe', 'Maybe', 'Maybe', 'Yes', 'Maybe', 'No', 'Maybe', 'Maybe', 'Maybe', 'Yes', 'Yes', 'Yes', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Yes', 'Maybe', 'Maybe', 'Yes', 'Yes', 'Maybe', 'Yes', 'Maybe', 'Maybe', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'Maybe', 'Yes', 'Maybe', 'Yes', 'Maybe', 'Maybe', 'Maybe', 'Yes'], ['What was your favourite part of the course?', 'shooting game', 'Learning about the shooting game', 'Everything', 'Learning how to use the game block.', 'The games! :PPP', 'Everything.', 'Programming', 'The coding', 'The use of the microbit to play the game I created.', 'The project ', 'trying to code the microbit', 'Programming the microbit', 'the prentation', 'Programming the shooting game', 'Our projects', 'When we were working on projects.', 'The creation of the flappy bird code', 'The computer', 'Programming', 'Coding and decryption (Radio)', 'Probably everything', 'NIL', 'The project', 'Learning new techinal skills', 'Using the computer', 'Individual project', 'Learning about different aspects of programming', 'Learning how to code new games.', 'The Project', 'The decoding lesson', 'getting to know how to code complicated codes', 'Experimenting with the codes.', 'Trying to learn to write Javascript through the blocks system', 'My favourite part of the course was doing the caesar decoder as it was quite challenging and made me think about my code.', 'Making games', 'Learning how to make games on microbit', 'the programming', 'Learning how to program games using microbit', 'I enjoy making games, such as flappy bird throughout the course. I also enjoy the process of learning different functions, such as array, something I did not learn in Scratch.', 'Learning about making games', 'The project making', 'Making games.', 'making the games', 'The microbit assignment at the end of the module', 'Creation of the games', 'When i was working on the final microbit project', '-', 'The challenges the teacher assigned.', 'making a game\n', 'making games', 'The microbit tryouts', 'the part where it ended', 'Being able to learn how to successfully program a microbit gives a sense of accomplishment.', 'learning about different coding blocks', 'The assignment', 'The Individual Microbit Project', 'Variables', 'Programming', 'The project', 'Creating the bullet game for the assignment (summative)', 'making games', 'ceaser cypher', 'getting to code ', 'Loops and Logic', 'When we were allowed to use the computers', 'The programming and trial and error part of the coding that was fun and exciting.', 'Everything', 'when we learnt the game for fighting and shooting aliens ', 'The individual assignment', 'I like building games', 'na', 'It was fun and enjoyable, the activities we did with the micro-bit was very interactive and fun.', 'Programming games on the Microbit', 'When we tried to decode a message.', 'THE PART WHEN WE START PROGRAMMING', 'Making fun programs with microbit.', 'Posting a YouTube video', 'Caesar Cipher', 'Getting to programme.', 'making a game', 'Creating the Flappy Bird Game', 'getting points', '\n Learning how to programme games', 'The summative when creating your own game or code', 'The fun activities.', 'programming', 'Lessons', 'Making the code for the game', 'Programming the shooting game', 'Learning about microbits', 'The find the boat thing', 'Trying my hands on coding the microbit!', 'bonus raw marks for homework', 'The part where we had to decipher the code', 'Learning to code', 'learning microbit', 'the teacher', 'nill', 'the part when we can watch utube', 'The video', 'Playing Games', 'The project', 'the last few weeks because we got to use our creativity to combine everything we learnt.', 'Programming the game at the end of the course', 'hands-on tasks', 'Group work/games', 'The last few lessons were less stressful because there were more time to do our projects. ', 'Learning how to make games.', 'Learning and using arrays', 'Learning about variables', 'Programming', 'Using the computers to play games on the sly.', 'actually using the micro bit\n', 'The challenges', 'creating game codes', 'the project', 'Learning about coding', 'Learning to code', 'Creating new projects with microbit', 'The making game part.', 'My favourite part of the course was when i got to experiment for myself using microbit.org to make my own codes.', 'creating games', 'The creating of games on microbit.', 'Playing the games that is coded on the microbit.', 'The Project and the last lesson.', 'using computers', 'Seeing my codes work', 'The favourite part of the course was the flappy bird. ', 'Using the microbit simulator', 'The part before we learnt about Microbit', 'The final project', 'Coding Project!', 'Using an actual Microbit', 'learning to code', 'Learning how to code Flappy Bird.', 'Creating games', 'Being able to think of new solutions to the same problem', 'idk', 'Learning about variables', 'Individual project', 'Coding complicated games.', 'I like the lesson when we get to use the microbit.', 'My favourite part of the course was solving the challenge homework questions (e.g. Card games and AI) which really stretched my coding skills further and put it into perspective for me as a fun and useful part of our daily lives. Similarly, the process of coding my own games and programs allowed me to learn about troubleshooting.', 'Learning how to use Microbit together with programs.', 'Coding games and removing bugs in the coding', 'watching the video', 'learning how to code games', 'Own project', 'The blackjack and using knowledge to create your own games!', 'Learning how to code games.', 'Creating new programmes', 'The project - coding was very enjoyable', 'The hands on activities', 'Hardware', 'Programming the last assignment', 'The lessons', 'Hardware', 'Programming games and using the tinker kits', 'Learning about several coding parts in Microbit (arrays, loops), learning about things like algorithms', 'When I could present solutions which were practical and understandable ', 'solving the problems and doing the assignments at home. ', 'The lessons', 'Learning how to make games.', 'Working with the computers to programme games', 'It was the project part because you can create any game you want.', 'Being able to see my end project', 'Learning how to make games', 'Making my own game', 'The video', 'Logic', 'Exploring the set', 'Making my own game', 'Videos', 'Final project, multiplayer game with Putra', 'Playing with Microbit set', 'Everything', 'Homework assignments where we are given a problem to be solved with the use of a microbit, which we must solve.', 'we tried out many different use of microbits to solve out daily problems', 'programming games', 'learning to code games', 'Learning the different uses of the codes', 'Doing multiplayer projects w/ Yu Chen', 'Learning about the use for different programming functions', 'Learning about how to program using Javascript, although I did not learn much.', 'The video on algorhithms', 'Using the microbit', 'Using microbit.', 'Learning to code', 'Using the microbit', 'Coding programmes and games we want', 'Microbit', 'Using microbit', 'Using the actual microbit ', 'The bonus marks', 'The making of the project ', 'Programming!', 'Using the physical Microbit', 'I like the teacher', 'Coding', 'Coding games', 'Microbit']]
>>> openExcel("hello.xlsx", "world")
Error: hello.xlsx not found
'ERROR'
>>> openExcel("responses_testing.xlsx", "world")
Error: 'world' not found in responses_testing.xlsx
'ERROR'
"""
try:
workbook = openpyxl.load_workbook(filename) #opens the required Excel file
except:
print("Error: {} not found".format(filename))
return "ERROR"
if sheetname in workbook.sheetnames:
sheet = workbook[sheetname] #opens the required Excel spreadsheet
else:
print("Error: '{}' not found in {}".format(sheetname, filename))
return "ERROR"
sheetList = [ ] #initialises the output 2D array
columnCount = 0 #keeps count of the current column being read (the question whose responses are currently read)
for column in list(sheet.columns): #iterates through the columns (questions)
sheetList.append([ ]) #adds a list to store all the responses for the question
for cell in list(column): #iterates through each response for the current question
sheetList[columnCount].append(cell.value) #stores each response to the list for the current question
columnCount += 1 #updates the column (question) count
return sheetList #returns the 2D array containing all the responses
| 341.394737
| 11,362
| 0.66939
| 1,883
| 12,973
| 4.609665
| 0.184811
| 0.276498
| 0.412673
| 0.547465
| 0.277419
| 0.245277
| 0.218088
| 0.188364
| 0.179032
| 0.167051
| 0
| 0.01894
| 0.157558
| 12,973
| 37
| 11,363
| 350.621622
| 0.775277
| 0.948123
| 0
| 0.1
| 0
| 0
| 0.086687
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.25
| 0.1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c3215aa473baf704e88963d1163e612b6f4f0fbb
| 49
|
py
|
Python
|
amberelectric/api/__init__.py
|
madpilot/amberelectric.py
|
ffb26389d8022e8cdfa803fd51365c586686bd21
|
[
"Apache-2.0"
] | 3
|
2021-06-22T03:09:21.000Z
|
2022-03-17T03:53:03.000Z
|
amberelectric/api/__init__.py
|
madpilot/amberelectric.py
|
ffb26389d8022e8cdfa803fd51365c586686bd21
|
[
"Apache-2.0"
] | 4
|
2021-09-11T05:44:08.000Z
|
2021-10-02T12:15:38.000Z
|
amberelectric/api/__init__.py
|
madpilot/amberelectric.py
|
ffb26389d8022e8cdfa803fd51365c586686bd21
|
[
"Apache-2.0"
] | 3
|
2021-10-01T12:00:57.000Z
|
2022-03-17T09:55:49.000Z
|
from amberelectric.api.amber_api import AmberApi
| 24.5
| 48
| 0.877551
| 7
| 49
| 6
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c32fec9d33a5cdae7c8f4ea693608bad321838c2
| 35
|
py
|
Python
|
autoremote/__init__.py
|
wilderjds/autoremote
|
3c9ff08742839be619632256a447aea3c552e24a
|
[
"Apache-2.0"
] | null | null | null |
autoremote/__init__.py
|
wilderjds/autoremote
|
3c9ff08742839be619632256a447aea3c552e24a
|
[
"Apache-2.0"
] | null | null | null |
autoremote/__init__.py
|
wilderjds/autoremote
|
3c9ff08742839be619632256a447aea3c552e24a
|
[
"Apache-2.0"
] | null | null | null |
from .autoremote import Autoremote
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5edab31b985b3c3a90d5d2da2e41ab7e945ef42e
| 382
|
py
|
Python
|
terrascript/data/oraclepaas.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/data/oraclepaas.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/data/oraclepaas.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/data/oraclepaas.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:24:00 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.data.oraclepaas
#
# instead of
#
# >>> import terrascript.data.hashicorp.oraclepaas
#
# This is only available for 'official' and 'partner' providers.
from terrascript.data.hashicorp.oraclepaas import *
| 25.466667
| 73
| 0.746073
| 49
| 382
| 5.816327
| 0.693878
| 0.210526
| 0.175439
| 0.238596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036254
| 0.133508
| 382
| 14
| 74
| 27.285714
| 0.824773
| 0.795812
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5ef699b95a63d30603b1171e8cfc8d5d71c2e92e
| 302
|
py
|
Python
|
ex108/teste.py
|
Jordemar-D-Bousquet/Exercicios_Python
|
705d4c83720db033841f01aa843e4dbab08f1423
|
[
"MIT"
] | null | null | null |
ex108/teste.py
|
Jordemar-D-Bousquet/Exercicios_Python
|
705d4c83720db033841f01aa843e4dbab08f1423
|
[
"MIT"
] | null | null | null |
ex108/teste.py
|
Jordemar-D-Bousquet/Exercicios_Python
|
705d4c83720db033841f01aa843e4dbab08f1423
|
[
"MIT"
] | null | null | null |
from ex108 import moeda
p = float(input('Digite o preço R$:'))
print(f'A medade de {moeda.moeda(p)} é {moeda.moeda(moeda.metade(p))}')
print(f'O dobro de {moeda.moeda(p)} é {moeda.moeda(moeda.dobro(p))}')
print(f'Aumentando a taxa em 10% de {moeda.moeda(p)} temos {moeda.moeda(moeda.aumentar(p,10))}')
| 50.333333
| 96
| 0.692053
| 57
| 302
| 3.666667
| 0.438596
| 0.430622
| 0.172249
| 0.186603
| 0.277512
| 0.277512
| 0.277512
| 0.277512
| 0
| 0
| 0
| 0.02583
| 0.102649
| 302
| 6
| 96
| 50.333333
| 0.745387
| 0
| 0
| 0
| 0
| 0.6
| 0.739274
| 0.310231
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.6
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
6f4c91b65a33b7c05f38d03310fb2c1a22a7fb0c
| 6
|
py
|
Python
|
demisto_sdk/tests/test_files/Packs/DummyPack/Scripts/DummyScript/DummyScript.py
|
sturmianseq/demisto-sdk
|
67ce7ee70ccd557d661e03a60469301c5cbcb9c0
|
[
"MIT"
] | 42
|
2019-11-07T13:02:00.000Z
|
2022-03-29T03:39:04.000Z
|
demisto_sdk/tests/test_files/Packs/DummyPack/Scripts/DummyScript/DummyScript.py
|
sturmianseq/demisto-sdk
|
67ce7ee70ccd557d661e03a60469301c5cbcb9c0
|
[
"MIT"
] | 1,437
|
2019-11-07T13:02:25.000Z
|
2022-03-31T12:48:11.000Z
|
demisto_sdk/tests/test_files/Packs/DummyPack/Scripts/DummyScript/DummyScript.py
|
sturmianseq/demisto-sdk
|
67ce7ee70ccd557d661e03a60469301c5cbcb9c0
|
[
"MIT"
] | 46
|
2019-12-09T21:44:30.000Z
|
2022-03-24T17:36:45.000Z
|
a = 5
| 3
| 5
| 0.333333
| 2
| 6
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.5
| 6
| 1
| 6
| 6
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f5fed9a268d036fc5d10f92e80fbdc81b890a56
| 4,725
|
py
|
Python
|
test/test_types.py
|
sCrypt-Inc/py-scryptlib
|
58aa2d8dca36b42ea032825f1bfc01e2d9a65424
|
[
"MIT"
] | 7
|
2021-11-14T20:10:29.000Z
|
2022-02-26T10:05:07.000Z
|
test/test_types.py
|
sCrypt-Inc/py-scryptlib
|
58aa2d8dca36b42ea032825f1bfc01e2d9a65424
|
[
"MIT"
] | 1
|
2021-08-12T16:50:42.000Z
|
2021-09-08T20:13:26.000Z
|
test/test_types.py
|
sCrypt-Inc/scryptlib-python
|
4df358e89231bf9c9698240d17e06f04b61218d3
|
[
"MIT"
] | 1
|
2021-10-16T23:46:23.000Z
|
2021-10-16T23:46:23.000Z
|
import pytest
from scryptlib.types import *
def test_type_bytes():
b = Bytes('01')
assert(b.hex == '0101')
b = Bytes(b'\x01')
assert(b.hex == '0101')
# OP_PUSHDATA1
b = Bytes('ff' * 100)
assert(b.hex == '4c64' + 'ff' * 100)
b = Bytes('ff' * 255)
assert(b.hex == '4cff' + 'ff' * 255)
# OP_PUSHDATA2
b = Bytes('ff' * 256)
assert(b.hex == '4d0001' + 'ff' * 256)
b = Bytes('ff' * 65535)
assert(b.hex == '4dffff' + 'ff' * 65535)
# OP_PUSHDATA4
b = Bytes('ff' * 65536)
assert(b.hex == '4e00000100' + 'ff' * 65536)
def test_type_int():
x = Int(73219837192873198232871937891273981279837198793818)
assert(x.hex == '155abc0013a5e275d529dc04d7e2320ae0a60d5b1932')
x = Int(-73219837192873198232871937891273981279837198793818)
assert(x.hex == '155abc0013a5e275d529dc04d7e2320ae0a60d5b19b2')
def test_type_privkey():
# Positive
x = PrivKey(bytes.fromhex('7ED697BCE5AEF3F7B09CBD6BBB8EBACF0C53D8B80DD90BACF8644C11648E8784'))
assert(x.hex == '2084878e64114c64f8ac0bd90db8d8530ccfba8ebb6bbd9cb0f7f3aee5bc97d67e')
x = PrivKey('7ED697BCE5AEF3F7B09CBD6BBB8EBACF0C53D8B80DD90BACF8644C11648E8784')
assert(x.hex == '2084878e64114c64f8ac0bd90db8d8530ccfba8ebb6bbd9cb0f7f3aee5bc97d67e')
# Negative
x = PrivKey(70024952860251874614749626492917994704208775384514195732065700789540272030212)
assert(x.hex == '2104421d3fb78c05aba0d68817fce03e2b0cf7d058f74705a7ec76288202b8d09a00')
x = PrivKey(0xc34039e780c90ec8517a556b379954076b04c792035407802f3e65e61c1cd3c5)
assert(x.hex == '21c5d31c1ce6653e2f8007540392c7046b075499376b557a51c80ec980e73940c300')
def test_type_hashedmap():
hm = HashedMap(Int, Int)
hm.set(Int(3), Int(1))
assert(hm.hex == '084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c54bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a')
hm.set(Int(5), Int(6))
assert(hm.hex == 'e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c54bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a')
hm.set(0, 11)
assert(hm.hex == 'e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c54bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459ae3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855e7cf46a078fed4fafd0b5e3aff144802b853f8ae459a4f0c14add3314b7cc3a6')
hm.set(Int(1), Int(5))
assert(hm.hex == 'e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c54bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459ae77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743dbe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855e7cf46a078fed4fafd0b5e3aff144802b853f8ae459a4f0c14add3314b7cc3a6')
hm.delete(Int(1))
hm.delete(Int(0))
assert(hm.hex == 'e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c54bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a')
hm = HashedMap(Bytes, Int)
with pytest.raises(AssertionError):
hm.set(Int(0), Int(1))
hm.set(Bytes('1234'), Int(11))
def test_type_hashedset():
hs = HashedSet(Int)
hs.add(3)
assert(hs.hex == '084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c5')
hs.add(Int(5))
assert(hs.hex == 'e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c5')
hs.add(0)
assert(hs.hex == 'e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c5e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
hs.add(Int(1))
assert(hs.hex == 'e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c54bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459ae3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
hs.delete(Int(1))
hs.delete(Int(0))
assert(hs.hex == 'e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c5')
| 50.806452
| 536
| 0.82328
| 262
| 4,725
| 14.79771
| 0.267176
| 0.010833
| 0.018055
| 0.007222
| 0.244003
| 0.033015
| 0
| 0
| 0
| 0
| 0
| 0.470519
| 0.099048
| 4,725
| 92
| 537
| 51.358696
| 0.440216
| 0.011852
| 0
| 0.131148
| 0
| 0
| 0.612744
| 0.598155
| 0.081967
| 1
| 0.01416
| 0
| 0.393443
| 1
| 0.081967
| false
| 0
| 0.032787
| 0
| 0.114754
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.